import os
from pathlib import Path
root = Path(__file__).resolve().parent
md_out = root / "CODE_LAYOUT.md"
html_out = root / "docs/code_layout.html"
layout_dir = root / "docs/layout_parts"
os.makedirs(layout_dir, exist_ok=True)
md_overview = layout_dir / "code_layout_overview.md"
md_core = layout_dir / "code_layout_core.md"
md_experiments = layout_dir / "code_layout_experiments.md"
md_interface = layout_dir / "code_layout_interface.md"
md_output = layout_dir / "code_layout_output.md"
md_entry = layout_dir / "code_layout_entrypoint.md"
os.makedirs(html_out.parent, exist_ok=True)
def tree_lines(p: Path, prefix=""):
if "__pycache__" in p.parts or p.name.startswith('.') or p.suffix == ".pyc":
return
yield prefix + p.name + ("/" if p.is_dir() else "")
if p.is_dir():
kids = [k for k in sorted(p.iterdir())
if "__pycache__" not in k.parts and not k.name.startswith('.') and k.suffix != ".pyc"]
for i, k in enumerate(kids):
tee = "└── " if i == len(kids) - 1 else "├── "
yield from tree_lines(k, prefix + tee)
# --- Markdown output ---
with md_out.open("w", encoding="utf-8") as f:
f.write("# Project Layout (`Vireon`)\n\n```\n")
for line in tree_lines(root):
f.write(line + "\n")
f.write("```\n\n")
for py in root.rglob("*.py"):
if "__pycache__" in py.parts or py.suffix == ".pyc":
continue
rel = py.relative_to(root)
try:
code = py.read_text(encoding="utf-8")
except Exception:
f.write(f"---\n### `{rel}` – *skipped (unreadable)*\n\n")
continue
f.write(f"---\n### `{rel}`\n\n```python\n{code}\n```\n\n")
# --- HTML output ---
html_top = """
Vireon Project Layout
Vireon Project Layout
"""
html_bottom = ""
with html_out.open("w", encoding="utf-8") as f:
f.write(html_top)
f.write("
\n")
for line in tree_lines(root):
depth = line.count("├") + line.count("└")
is_file = not line.endswith("/")
tag = "file" if is_file else ""
f.write(f"
{line}
\n")
f.write("
\n")
for py in root.rglob("*.py"):
if "__pycache__" in py.parts or py.suffix == ".pyc":
continue
rel = py.relative_to(root)
try:
code = py.read_text(encoding="utf-8")
except Exception:
f.write(f"
{rel}
Skipped (unreadable)
")
continue
f.write(f"
{rel}
\n
{code}
\n")
f.write(html_bottom)
print(f"✅ Wrote CODE_LAYOUT.md and docs/code_layout.html")
main.py
import os
from core.engine.lab_runner import run_lab
def list_experiments(base_path="experiments"):
return [d for d in os.listdir(base_path)
if os.path.isdir(os.path.join(base_path, d)) and os.path.exists(os.path.join(base_path, d, "logic.py"))]
def main():
print("🧪 Welcome to Vireon Virtual Lab")
experiments = list_experiments()
if not experiments:
print("⚠️ No experiments found.")
return
print("\\nAvailable Experiments:")
for i, exp in enumerate(experiments, 1):
print(f" {i}. {exp.replace('_', ' ').title()}")
choice = input("\\nEnter experiment number or name: ").strip()
if choice.isdigit():
idx = int(choice) - 1
if 0 <= idx < len(experiments):
run_lab(experiments[idx])
else:
print("❌ Invalid number.")
elif choice in experiments:
run_lab(choice)
else:
print("❌ Invalid selection.")
if __name__ == "__main__":
main()
__init__.py
ai\analysis.py
import numpy as np
import matplotlib.pyplot as plt
import os
def analyze_force_data(force_values):
if not force_values:
return {}
return {
"max": max(force_values),
"min": min(force_values),
"average": sum(force_values) / len(force_values),
"std_dev": np.std(force_values)
}
def analyze_force_csv(csv_path, plot=True):
try:
force_values = np.loadtxt(csv_path, delimiter=",", skiprows=1)
stats = analyze_force_data(force_values.tolist())
print(f"🔍 Analysis of {csv_path}")
for k, v in stats.items():
print(f"{k.capitalize()}: {v:.4e} N")
if plot:
plt.figure()
plt.plot(force_values, label="|F|")
plt.axhline(stats['average'], color='r', linestyle='--', label='Mean')
plt.title("Force Magnitude Over Time")
plt.xlabel("Step")
plt.ylabel("|F| (N)")
plt.legend()
plt.grid(True)
plot_path = csv_path.replace(".csv", "_analysis.png")
plt.savefig(plot_path)
plt.close()
print(f"📈 Plot saved to {plot_path}")
return stats
except Exception as e:
print(f"⚠️ Failed to analyze file: {e}")
return {}
ai\__init__.py
core\config.py
# Global simulation constants (overridable via config_loader)
K = 1.0 # Coupling constant for F = k(Q·f)M
DEFAULT_CHARGE = 1.0 # Coulombs
DEFAULT_FREQ = 5.0 # Hz
DEFAULT_MASS = 1.0 # kg
core\config_loader.py
import yaml
def load_config(path="config.yaml"):
try:
with open(path, 'r') as f:
return yaml.safe_load(f)
except Exception as e:
print(f"⚠️ Failed to load config file: {e}")
return {}
core\logger.py
import csv
import json
import os
def save_csv(data, headers, filepath):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(headers)
writer.writerows(data)
def save_json(summary, filepath):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
json.dump(summary, f, indent=4)
core\particle_engine.py
import numpy as np
import matplotlib.pyplot as plt
def update_particles(positions, velocities, accelerations, dt):
velocities += accelerations * dt
positions += velocities * dt
return positions, velocities
def demo_run_simulation():
num_particles = 100
steps = 500
field_freq = 5.0
positions = np.random.rand(num_particles, 2)
velocities = np.zeros_like(positions)
for t in range(steps):
force = np.sin(2 * np.pi * field_freq * (t / steps))
velocities += force * 0.01
positions += velocities * 0.01
if t % 50 == 0:
plt.clf()
plt.scatter(positions[:, 0], positions[:, 1], c='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.title(f"Step {t}")
plt.pause(0.01)
plt.show()
core\physics.py
import numpy as np
def calculate_force(Q, f, M, k=1.0):
return k * (Q * f) * M
def compute_force(k, Q, f, M, r_vec, t, omega, waveform_func):
r_mag = np.linalg.norm(r_vec) + 1e-9
unit_vec = r_vec / r_mag
signal = np.abs(waveform_func(f, t))
return k * Q * f * M * signal * unit_vec / r_mag**2
core\visualizer.py
import matplotlib.pyplot as plt
import os
def save_plot(x, y, title, xlabel, ylabel, filepath):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
plt.figure()
plt.plot(x, y)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
plt.savefig(filepath)
plt.close()
def plot_line(x, y, xlabel="X", ylabel="Y", title="Plot", path=None):
plt.figure(figsize=(8, 5))
plt.plot(x, y, marker='o')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.grid(True)
if path:
plt.savefig(path)
plt.close()
import importlib
def run_lab(experiment_name):
try:
module = importlib.import_module(f"experiments.{experiment_name}.logic")
module.run()
except ModuleNotFoundError:
print(f"❌ Experiment '{experiment_name}' not found. Check the folder name.")
except AttributeError:
print(f"❌ Experiment '{experiment_name}' does not contain a run() function.")
core\engine\__init__.py
experiments\charge_frequency_sweep\logic.py
import numpy as np
import matplotlib.pyplot as plt
import os
from core.config_loader import load_config
def get_input(prompt, default):
val = input(f"{prompt} [default: {default}]: ")
return eval(val) if val.strip() else default
def run():
print("\n🔬 Running Charge–Frequency Sweep")
cfg = load_config("experiments/charge_frequency_sweep/config.yaml")
charges = get_input("Enter array of charges (C)", cfg.get("charges"))
freqs = get_input("Enter array of frequencies (Hz)", cfg.get("frequencies"))
M = get_input("Enter test mass (kg)", cfg.get("mass"))
k = get_input("Enter proportionality constant k", cfg.get("default_k"))
out_dir = cfg.get("output_dir")
force_matrix = np.zeros((len(charges), len(freqs)))
os.makedirs(out_dir, exist_ok=True)
for i, Q in enumerate(charges):
for j, f in enumerate(freqs):
F = k * Q * f * M
force_matrix[i][j] = F
fig, ax = plt.subplots()
im = ax.imshow(force_matrix, cmap='plasma', origin='lower')
ax.set_xticks(np.arange(len(freqs)))
ax.set_yticks(np.arange(len(charges)))
ax.set_xticklabels([f"{f:.0e}" for f in freqs])
ax.set_yticklabels([f"{q:.0e}" for q in charges])
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel("Charge (C)")
ax.set_title("Force Magnitude (N)")
fig.colorbar(im, ax=ax)
plt.tight_layout()
plt.savefig(f"{out_dir}/force_matrix.png")
plt.close()
np.savetxt(f"{out_dir}/force_matrix.csv", force_matrix, delimiter=",", fmt="%.4e")
print(f"✅ Output saved to {out_dir}")
experiments\charge_frequency_sweep\__init__.py
experiments\field_orientation_variance\logic.py
import numpy as np
import os
from core.config_loader import load_config
from core.waveforms import sine
from core.physics import compute_force
from core.visualizer import save_plot
def get_input(prompt, default):
val = input(f"{prompt} [default: {default}]: ")
return eval(val) if val.strip() else default
def run():
print("\n🔬 Running Field Orientation Variance Simulation")
cfg = load_config("experiments/field_orientation_variance/config.yaml")
N = get_input("Enter grid size (NxNxN)", cfg.get("grid_size"))
f = get_input("Enter frequency (Hz)", cfg.get("frequency"))
Q = get_input("Enter charge per particle (C)", cfg.get("charge"))
M = get_input("Enter test mass (kg)", cfg.get("mass"))
mode = get_input("Enter phase mode (coherent, random, linear)", cfg.get("mode"))
k = get_input("Enter proportionality constant k", cfg.get("default_k"))
out_dir = cfg.get("output_dir")
os.makedirs(out_dir, exist_ok=True)
grid = np.linspace(-0.5, 0.5, N)
sources = np.array([(x, y, z) for x in grid for y in grid for z in grid])
test_point = np.array([0.0, 0.0, 0.0])
steps = 1000
dt = 1e-4
t_arr = np.arange(steps) * dt
force_trace = []
# Phase assignment
if mode == "coherent":
phases = np.zeros(len(sources))
elif mode == "random":
phases = np.random.uniform(0, 2 * np.pi, len(sources))
elif mode == "linear":
phases = np.linspace(0, 2 * np.pi, len(sources))
else:
print("❌ Invalid mode. Using 'coherent'.")
phases = np.zeros(len(sources))
for idx, t in enumerate(t_arr):
F_total = np.zeros(3)
for i, src in enumerate(sources):
r_vec = test_point - src
shifted_t = t + phases[i] / (2 * np.pi * f)
F = compute_force(k, Q, f, M, r_vec, shifted_t, omega=2*np.pi*f, waveform_func=sine)
F_total += F
force_trace.append(np.linalg.norm(F_total))
# Output
np.savetxt(f"{out_dir}/field_variance_force_trace.csv", force_trace, delimiter=",", fmt="%.4e")
save_plot(t_arr, force_trace, "Force vs. Time", "Time (s)", "|F| (N)", f"{out_dir}/field_variance_plot.png")
print(f"✅ Output saved to {out_dir}")
import numpy as np
import os
import time
from core.config_loader import load_config
from core.waveforms import sine
from core.physics import compute_force
from core.particle_engine import update_particles
from core.visualizer import save_plot
from core.logger import save_csv
def get_input(prompt, default, cast_type=float):
val = input(f"{prompt} [default: {default}]: ")
try:
return cast_type(val) if val.strip() else cast_type(default)
except:
print("⚠️ Invalid input, using default.")
return cast_type(default)
def run():
print("\n🔬 Running Plasma Resonance Collapse Simulation")
cfg = load_config("experiments/plasma_resonance_collapse/config.yaml")
print("🔎 Loaded config:", cfg)
N = get_input("Enter number of particles", cfg.get("particles"), int)
R = get_input("Enter initial spatial radius (m)", cfg.get("radius"), float)
f = get_input("Enter frequency (Hz)", cfg.get("frequency"), float)
Q = get_input("Enter charge per particle (C)", cfg.get("charge"), float)
M = get_input("Enter particle mass (kg)", cfg.get("mass"), float)
k = get_input("Enter proportionality constant k", cfg.get("default_k"), float)
steps = get_input("Enter number of steps", cfg.get("steps"), int)
dt = get_input("Enter time step (s)", cfg.get("dt"), float)
out_dir = cfg.get("output_dir")
os.makedirs(out_dir, exist_ok=True)
print("💾 Output directory:", out_dir)
print(f"🧾 Parameters: N={N}, R={R}, f={f}, Q={Q}, M={M}, k={k}, steps={steps}, dt={dt}")
print("✅ All parameters received. Starting simulation loop...")
# Initialize particles
np.random.seed(42)
positions = np.random.uniform(-R, R, size=(N, 3))
velocities = np.zeros_like(positions)
force_trace = []
start_time = time.time()
for t_idx in range(steps):
t = t_idx * dt
accelerations = np.zeros_like(positions)
for i in range(N):
net_F = np.zeros(3)
for j in range(N):
if i == j:
continue
r_vec = positions[i] - positions[j]
F = compute_force(k, Q, f, M, r_vec, t, omega=2 * np.pi * f, waveform_func=sine)
net_F += F
accelerations[i] = net_F / M
positions, velocities = update_particles(positions, velocities, accelerations, dt)
net_energy = np.mean(np.linalg.norm(velocities, axis=1) ** 2)
force_trace.append(net_energy)
if t_idx % 100 == 0 or t_idx == steps - 1:
print(f" Step {t_idx + 1}/{steps} complete...")
duration = round(time.time() - start_time, 2)
save_plot(range(steps), force_trace,
"Average Kinetic Energy", "Step", "Velocity² (Arb)",
f"{out_dir}/collapse_trace.png")
save_csv([[i, f] for i, f in enumerate(force_trace)],
["Step", "Kinetic"], f"{out_dir}/collapse_trace.csv")
print(f"\n✅ Simulation complete in {duration} seconds.")
print(f"📊 Output saved to: {out_dir}")
experiments\plasma_resonance_collapse\__init__.py
experiments\resonant_particle_field\logic.py
import numpy as np
import os
from core.config_loader import load_config
from core.waveforms import sine
from core.physics import compute_force
from core.visualizer import save_plot
def get_input(prompt, default):
val = input(f"{prompt} [default: {default}]: ")
return eval(val) if val.strip() else default
def run():
print("\n🔬 Running Resonant Particle Field Simulation")
cfg = load_config("experiments/resonant_particle_field/config.yaml")
N = get_input("Enter grid size (NxNxN)", cfg.get("grid_size"))
f = get_input("Enter frequency (Hz)", cfg.get("frequency"))
Q = get_input("Enter charge per particle (C)", cfg.get("charge"))
M = get_input("Enter test mass (kg)", cfg.get("mass"))
k = get_input("Enter proportionality constant k", cfg.get("default_k"))
out_dir = cfg.get("output_dir")
os.makedirs(out_dir, exist_ok=True)
# Setup 3D grid of source charges
grid = np.linspace(-0.5, 0.5, N)
sources = np.array([(x, y, z) for x in grid for y in grid for z in grid])
test_point = np.array([0.0, 0.0, 0.0])
steps = 1000
dt = 1e-4
t_arr = np.arange(steps) * dt
force_trace = []
for t in t_arr:
F_total = np.zeros(3)
for src in sources:
r_vec = test_point - src
F = compute_force(k, Q, f, M, r_vec, t, omega=2*np.pi*f, waveform_func=sine)
F_total += F
force_magnitude = np.linalg.norm(F_total)
force_trace.append(force_magnitude)
# Save results
np.savetxt(f"{out_dir}/resonance_force_trace.csv", force_trace, delimiter=",", fmt="%.4e")
save_plot(t_arr, force_trace, "Force vs. Time", "Time (s)", "|F| (N)", f"{out_dir}/resonance_force_plot.png")
print(f"✅ Output saved to {out_dir}")
experiments\resonant_particle_field\__init__.py
experiments\toroidal_field_rotation\logic.py
import numpy as np
import os
from core.config_loader import load_config
from core.waveforms import sine
from core.physics import compute_force
from core.visualizer import save_plot
def get_input(prompt, default):
val = input(f"{prompt} [default: {default}]: ")
return eval(val) if val.strip() else default
def run():
print("\n🔬 Running Toroidal Field Rotation Simulation")
cfg = load_config("experiments/toroidal_field_rotation/config.yaml")
radius = get_input("Enter ring radius (m)", cfg.get("radius"))
num_charges = get_input("Enter number of rotating charges", cfg.get("num_charges"))
omega_rot = get_input("Enter rotational angular speed (rad/s)", cfg.get("omega_rot"))
f = get_input("Enter oscillation frequency (Hz)", cfg.get("frequency"))
Q = get_input("Enter charge per particle (C)", cfg.get("charge"))
M = get_input("Enter test mass (kg)", cfg.get("mass"))
k = get_input("Enter proportionality constant k", cfg.get("default_k"))
out_dir = cfg.get("output_dir")
os.makedirs(out_dir, exist_ok=True)
angles = np.linspace(0, 2*np.pi, num_charges, endpoint=False)
test_point = np.array([0.0, 0.0, 0.0])
steps = 1000
dt = 1e-4
t_arr = np.arange(steps) * dt
force_trace = []
for t in t_arr:
F_total = np.zeros(3)
for theta in angles:
# Rotate position
angle = theta + omega_rot * t
x = radius * np.cos(angle)
y = radius * np.sin(angle)
src = np.array([x, y, 0.0])
r_vec = test_point - src
F = compute_force(k, Q, f, M, r_vec, t, omega=2*np.pi*f, waveform_func=sine)
F_total += F
force_trace.append(np.linalg.norm(F_total))
np.savetxt(f"{out_dir}/toroidal_force_trace.csv", force_trace, delimiter=",", fmt="%.4e")
save_plot(t_arr, force_trace, "Toroidal Force vs. Time", "Time (s)", "|F| (N)", f"{out_dir}/toroidal_force_plot.png")
print(f"✅ Output saved to {out_dir}")
# Copyright (c) 2010-2024 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson "
__version__ = "1.17.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
if PY34:
from importlib.util import spec_from_loader
else:
spec_from_loader = None
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def find_spec(self, fullname, path, target=None):
if fullname in self.known_modules:
return spec_from_loader(fullname, self)
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections", "IterableUserDict", "UserDict"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
if sys.version_info[:2] < (3, 14):
_urllib_request_moved_attributes.extend(
[
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
]
)
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
venv\Lib\site-packages\contourpy\array.py
from __future__ import annotations
from itertools import chain, pairwise
from typing import TYPE_CHECKING
import numpy as np
from contourpy.typecheck import check_code_array, check_offset_array, check_point_array
from contourpy.types import CLOSEPOLY, LINETO, MOVETO, code_dtype, offset_dtype, point_dtype
if TYPE_CHECKING:
import contourpy._contourpy as cpy
def codes_from_offsets(offsets: cpy.OffsetArray) -> cpy.CodeArray:
"""Determine codes from offsets, assuming they all correspond to closed polygons.
"""
check_offset_array(offsets)
n = offsets[-1]
codes = np.full(n, LINETO, dtype=code_dtype)
codes[offsets[:-1]] = MOVETO
codes[offsets[1:] - 1] = CLOSEPOLY
return codes
def codes_from_offsets_and_points(
offsets: cpy.OffsetArray,
points: cpy.PointArray,
) -> cpy.CodeArray:
"""Determine codes from offsets and points, using the equality of the start and end points of
each line to determine if lines are closed or not.
"""
check_offset_array(offsets)
check_point_array(points)
codes = np.full(len(points), LINETO, dtype=code_dtype)
codes[offsets[:-1]] = MOVETO
end_offsets = offsets[1:] - 1
closed = np.all(points[offsets[:-1]] == points[end_offsets], axis=1)
codes[end_offsets[closed]] = CLOSEPOLY
return codes
def codes_from_points(points: cpy.PointArray) -> cpy.CodeArray:
"""Determine codes for a single line, using the equality of the start and end points to
determine if the line is closed or not.
"""
check_point_array(points)
n = len(points)
codes = np.full(n, LINETO, dtype=code_dtype)
codes[0] = MOVETO
if np.all(points[0] == points[-1]):
codes[-1] = CLOSEPOLY
return codes
def concat_codes(list_of_codes: list[cpy.CodeArray]) -> cpy.CodeArray:
"""Concatenate a list of codes arrays into a single code array.
"""
if not list_of_codes:
raise ValueError("Empty list passed to concat_codes")
return np.concatenate(list_of_codes, dtype=code_dtype)
def concat_codes_or_none(list_of_codes_or_none: list[cpy.CodeArray | None]) -> cpy.CodeArray | None:
"""Concatenate a list of codes arrays or None into a single code array or None.
"""
list_of_codes = [codes for codes in list_of_codes_or_none if codes is not None]
if list_of_codes:
return concat_codes(list_of_codes)
else:
return None
def concat_offsets(list_of_offsets: list[cpy.OffsetArray]) -> cpy.OffsetArray:
"""Concatenate a list of offsets arrays into a single offset array.
"""
if not list_of_offsets:
raise ValueError("Empty list passed to concat_offsets")
n = len(list_of_offsets)
cumulative = np.cumsum([offsets[-1] for offsets in list_of_offsets], dtype=offset_dtype)
ret: cpy.OffsetArray = np.concatenate(
(list_of_offsets[0], *(list_of_offsets[i+1][1:] + cumulative[i] for i in range(n-1))),
dtype=offset_dtype,
)
return ret
def concat_offsets_or_none(
list_of_offsets_or_none: list[cpy.OffsetArray | None],
) -> cpy.OffsetArray | None:
"""Concatenate a list of offsets arrays or None into a single offset array or None.
"""
list_of_offsets = [offsets for offsets in list_of_offsets_or_none if offsets is not None]
if list_of_offsets:
return concat_offsets(list_of_offsets)
else:
return None
def concat_points(list_of_points: list[cpy.PointArray]) -> cpy.PointArray:
"""Concatenate a list of point arrays into a single point array.
"""
if not list_of_points:
raise ValueError("Empty list passed to concat_points")
return np.concatenate(list_of_points, dtype=point_dtype)
def concat_points_or_none(
list_of_points_or_none: list[cpy.PointArray | None],
) -> cpy.PointArray | None:
"""Concatenate a list of point arrays or None into a single point array or None.
"""
list_of_points = [points for points in list_of_points_or_none if points is not None]
if list_of_points:
return concat_points(list_of_points)
else:
return None
def concat_points_or_none_with_nan(
list_of_points_or_none: list[cpy.PointArray | None],
) -> cpy.PointArray | None:
"""Concatenate a list of points or None into a single point array or None, with NaNs used to
separate each line.
"""
list_of_points = [points for points in list_of_points_or_none if points is not None]
if list_of_points:
return concat_points_with_nan(list_of_points)
else:
return None
def concat_points_with_nan(list_of_points: list[cpy.PointArray]) -> cpy.PointArray:
"""Concatenate a list of points into a single point array with NaNs used to separate each line.
"""
if not list_of_points:
raise ValueError("Empty list passed to concat_points_with_nan")
if len(list_of_points) == 1:
return list_of_points[0]
else:
nan_spacer = np.full((1, 2), np.nan, dtype=point_dtype)
list_of_points = [list_of_points[0],
*list(chain(*((nan_spacer, x) for x in list_of_points[1:])))]
return concat_points(list_of_points)
def insert_nan_at_offsets(points: cpy.PointArray, offsets: cpy.OffsetArray) -> cpy.PointArray:
"""Insert NaNs into a point array at locations specified by an offset array.
"""
check_point_array(points)
check_offset_array(offsets)
if len(offsets) <= 2:
return points
else:
nan_spacer = np.array([np.nan, np.nan], dtype=point_dtype)
# Convert offsets to int64 to avoid numpy error when mixing signed and unsigned ints.
return np.insert(points, offsets[1:-1].astype(np.int64), nan_spacer, axis=0)
def offsets_from_codes(codes: cpy.CodeArray) -> cpy.OffsetArray:
"""Determine offsets from codes using locations of MOVETO codes.
"""
check_code_array(codes)
return np.append(np.nonzero(codes == MOVETO)[0], len(codes)).astype(offset_dtype)
def offsets_from_lengths(list_of_points: list[cpy.PointArray]) -> cpy.OffsetArray:
"""Determine offsets from lengths of point arrays.
"""
if not list_of_points:
raise ValueError("Empty list passed to offsets_from_lengths")
return np.cumsum([0] + [len(line) for line in list_of_points], dtype=offset_dtype)
def outer_offsets_from_list_of_codes(list_of_codes: list[cpy.CodeArray]) -> cpy.OffsetArray:
"""Determine outer offsets from codes using locations of MOVETO codes.
"""
if not list_of_codes:
raise ValueError("Empty list passed to outer_offsets_from_list_of_codes")
return np.cumsum([0] + [np.count_nonzero(codes == MOVETO) for codes in list_of_codes],
dtype=offset_dtype)
def outer_offsets_from_list_of_offsets(list_of_offsets: list[cpy.OffsetArray]) -> cpy.OffsetArray:
"""Determine outer offsets from a list of offsets.
"""
if not list_of_offsets:
raise ValueError("Empty list passed to outer_offsets_from_list_of_offsets")
return np.cumsum([0] + [len(offsets)-1 for offsets in list_of_offsets], dtype=offset_dtype)
def remove_nan(points: cpy.PointArray) -> tuple[cpy.PointArray, cpy.OffsetArray]:
"""Remove NaN from a points array, also return the offsets corresponding to the NaN removed.
"""
check_point_array(points)
nan_offsets = np.nonzero(np.isnan(points[:, 0]))[0]
if len(nan_offsets) == 0:
return points, np.array([0, len(points)], dtype=offset_dtype)
else:
points = np.delete(points, nan_offsets, axis=0)
nan_offsets -= np.arange(len(nan_offsets))
offsets: cpy.OffsetArray = np.empty(len(nan_offsets)+2, dtype=offset_dtype)
offsets[0] = 0
offsets[1:-1] = nan_offsets
offsets[-1] = len(points)
return points, offsets
def split_codes_by_offsets(codes: cpy.CodeArray, offsets: cpy.OffsetArray) -> list[cpy.CodeArray]:
"""Split a code array at locations specified by an offset array into a list of code arrays.
"""
check_code_array(codes)
check_offset_array(offsets)
if len(offsets) > 2:
return np.split(codes, offsets[1:-1])
else:
return [codes]
def split_points_by_offsets(
points: cpy.PointArray,
offsets: cpy.OffsetArray,
) -> list[cpy.PointArray]:
"""Split a point array at locations specified by an offset array into a list of point arrays.
"""
check_point_array(points)
check_offset_array(offsets)
if len(offsets) > 2:
return np.split(points, offsets[1:-1])
else:
return [points]
def split_points_at_nan(points: cpy.PointArray) -> list[cpy.PointArray]:
"""Split a points array at NaNs into a list of point arrays.
"""
check_point_array(points)
nan_offsets = np.nonzero(np.isnan(points[:, 0]))[0]
if len(nan_offsets) == 0:
return [points]
else:
nan_offsets = np.concatenate(([-1], nan_offsets, [len(points)]))
return [points[s+1:e] for s, e in pairwise(nan_offsets)]
venv\Lib\site-packages\contourpy\chunk.py
from __future__ import annotations
import math
def calc_chunk_sizes(
chunk_size: int | tuple[int, int] | None,
chunk_count: int | tuple[int, int] | None,
total_chunk_count: int | None,
ny: int,
nx: int,
) -> tuple[int, int]:
"""Calculate chunk sizes.
Args:
chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same
size in both directions if only one is specified. Cannot be negative.
chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the
same count in both directions if only one is specified. If less than 1, set to 1.
total_chunk_count (int, optional): Total number of chunks. If less than 1, set to 1.
ny (int): Number of grid points in y-direction.
nx (int): Number of grid points in x-direction.
Return:
tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size).
Note:
Zero or one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` should be
specified.
"""
if sum([chunk_size is not None, chunk_count is not None, total_chunk_count is not None]) > 1:
raise ValueError("Only one of chunk_size, chunk_count and total_chunk_count should be set")
if nx < 2 or ny < 2:
raise ValueError(f"(ny, nx) must be at least (2, 2), not ({ny}, {nx})")
if total_chunk_count is not None:
max_chunk_count = (nx-1)*(ny-1)
total_chunk_count = min(max(total_chunk_count, 1), max_chunk_count)
if total_chunk_count == 1:
chunk_size = 0
elif total_chunk_count == max_chunk_count:
chunk_size = (1, 1)
else:
factors = two_factors(total_chunk_count)
if ny > nx:
chunk_count = factors
else:
chunk_count = (factors[1], factors[0])
if chunk_count is not None:
if isinstance(chunk_count, tuple):
y_chunk_count, x_chunk_count = chunk_count
else:
y_chunk_count = x_chunk_count = chunk_count
x_chunk_count = min(max(x_chunk_count, 1), nx-1)
y_chunk_count = min(max(y_chunk_count, 1), ny-1)
chunk_size = (math.ceil((ny-1) / y_chunk_count), math.ceil((nx-1) / x_chunk_count))
if chunk_size is None:
y_chunk_size = x_chunk_size = 0
elif isinstance(chunk_size, tuple):
y_chunk_size, x_chunk_size = chunk_size
else:
y_chunk_size = x_chunk_size = chunk_size
if x_chunk_size < 0 or y_chunk_size < 0:
raise ValueError("chunk_size cannot be negative")
return y_chunk_size, x_chunk_size
def two_factors(n: int) -> tuple[int, int]:
"""Split an integer into two integer factors.
The two factors will be as close as possible to the sqrt of n, and are returned in decreasing
order. Worst case returns (n, 1).
Args:
n (int): The integer to factorize, must be positive.
Return:
tuple(int, int): The two factors of n, in decreasing order.
"""
if n < 0:
raise ValueError(f"two_factors expects positive integer not {n}")
i = math.ceil(math.sqrt(n))
while n % i != 0:
i -= 1
j = n // i
if i > j:
return i, j
else:
return j, i
venv\Lib\site-packages\contourpy\convert.py
from __future__ import annotations
from itertools import pairwise
from typing import TYPE_CHECKING, cast
import numpy as np
from contourpy._contourpy import FillType, LineType
import contourpy.array as arr
from contourpy.enum_util import as_fill_type, as_line_type
from contourpy.typecheck import check_filled, check_lines
from contourpy.types import MOVETO, offset_dtype
if TYPE_CHECKING:
import contourpy._contourpy as cpy
def _convert_filled_from_OuterCode(
filled: cpy.FillReturn_OuterCode,
fill_type_to: FillType,
) -> cpy.FillReturn:
if fill_type_to == FillType.OuterCode:
return filled
elif fill_type_to == FillType.OuterOffset:
return (filled[0], [arr.offsets_from_codes(codes) for codes in filled[1]])
if len(filled[0]) > 0:
points = arr.concat_points(filled[0])
codes = arr.concat_codes(filled[1])
else:
points = None
codes = None
if fill_type_to == FillType.ChunkCombinedCode:
return ([points], [codes])
elif fill_type_to == FillType.ChunkCombinedOffset:
return ([points], [None if codes is None else arr.offsets_from_codes(codes)])
elif fill_type_to == FillType.ChunkCombinedCodeOffset:
outer_offsets = None if points is None else arr.offsets_from_lengths(filled[0])
ret1: cpy.FillReturn_ChunkCombinedCodeOffset = ([points], [codes], [outer_offsets])
return ret1
elif fill_type_to == FillType.ChunkCombinedOffsetOffset:
if codes is None:
ret2: cpy.FillReturn_ChunkCombinedOffsetOffset = ([None], [None], [None])
else:
offsets = arr.offsets_from_codes(codes)
outer_offsets = arr.outer_offsets_from_list_of_codes(filled[1])
ret2 = ([points], [offsets], [outer_offsets])
return ret2
else:
raise ValueError(f"Invalid FillType {fill_type_to}")
def _convert_filled_from_OuterOffset(
filled: cpy.FillReturn_OuterOffset,
fill_type_to: FillType,
) -> cpy.FillReturn:
if fill_type_to == FillType.OuterCode:
separate_codes = [arr.codes_from_offsets(offsets) for offsets in filled[1]]
return (filled[0], separate_codes)
elif fill_type_to == FillType.OuterOffset:
return filled
if len(filled[0]) > 0:
points = arr.concat_points(filled[0])
offsets = arr.concat_offsets(filled[1])
else:
points = None
offsets = None
if fill_type_to == FillType.ChunkCombinedCode:
return ([points], [None if offsets is None else arr.codes_from_offsets(offsets)])
elif fill_type_to == FillType.ChunkCombinedOffset:
return ([points], [offsets])
elif fill_type_to == FillType.ChunkCombinedCodeOffset:
if offsets is None:
ret1: cpy.FillReturn_ChunkCombinedCodeOffset = ([None], [None], [None])
else:
codes = arr.codes_from_offsets(offsets)
outer_offsets = arr.offsets_from_lengths(filled[0])
ret1 = ([points], [codes], [outer_offsets])
return ret1
elif fill_type_to == FillType.ChunkCombinedOffsetOffset:
if points is None:
ret2: cpy.FillReturn_ChunkCombinedOffsetOffset = ([None], [None], [None])
else:
outer_offsets = arr.outer_offsets_from_list_of_offsets(filled[1])
ret2 = ([points], [offsets], [outer_offsets])
return ret2
else:
raise ValueError(f"Invalid FillType {fill_type_to}")
def _convert_filled_from_ChunkCombinedCode(
filled: cpy.FillReturn_ChunkCombinedCode,
fill_type_to: FillType,
) -> cpy.FillReturn:
if fill_type_to == FillType.ChunkCombinedCode:
return filled
elif fill_type_to == FillType.ChunkCombinedOffset:
codes = [None if codes is None else arr.offsets_from_codes(codes) for codes in filled[1]]
return (filled[0], codes)
else:
raise ValueError(
f"Conversion from {FillType.ChunkCombinedCode} to {fill_type_to} not supported")
def _convert_filled_from_ChunkCombinedOffset(
filled: cpy.FillReturn_ChunkCombinedOffset,
fill_type_to: FillType,
) -> cpy.FillReturn:
if fill_type_to == FillType.ChunkCombinedCode:
chunk_codes: list[cpy.CodeArray | None] = []
for points, offsets in zip(*filled):
if points is None:
chunk_codes.append(None)
else:
if TYPE_CHECKING:
assert offsets is not None
chunk_codes.append(arr.codes_from_offsets_and_points(offsets, points))
return (filled[0], chunk_codes)
elif fill_type_to == FillType.ChunkCombinedOffset:
return filled
else:
raise ValueError(
f"Conversion from {FillType.ChunkCombinedOffset} to {fill_type_to} not supported")
def _convert_filled_from_ChunkCombinedCodeOffset(
filled: cpy.FillReturn_ChunkCombinedCodeOffset,
fill_type_to: FillType,
) -> cpy.FillReturn:
if fill_type_to == FillType.OuterCode:
separate_points = []
separate_codes = []
for points, codes, outer_offsets in zip(*filled):
if points is not None:
if TYPE_CHECKING:
assert codes is not None
assert outer_offsets is not None
separate_points += arr.split_points_by_offsets(points, outer_offsets)
separate_codes += arr.split_codes_by_offsets(codes, outer_offsets)
return (separate_points, separate_codes)
elif fill_type_to == FillType.OuterOffset:
separate_points = []
separate_offsets = []
for points, codes, outer_offsets in zip(*filled):
if points is not None:
if TYPE_CHECKING:
assert codes is not None
assert outer_offsets is not None
separate_points += arr.split_points_by_offsets(points, outer_offsets)
separate_codes = arr.split_codes_by_offsets(codes, outer_offsets)
separate_offsets += [arr.offsets_from_codes(codes) for codes in separate_codes]
return (separate_points, separate_offsets)
elif fill_type_to == FillType.ChunkCombinedCode:
ret1: cpy.FillReturn_ChunkCombinedCode = (filled[0], filled[1])
return ret1
elif fill_type_to == FillType.ChunkCombinedOffset:
all_offsets = [None if codes is None else arr.offsets_from_codes(codes)
for codes in filled[1]]
ret2: cpy.FillReturn_ChunkCombinedOffset = (filled[0], all_offsets)
return ret2
elif fill_type_to == FillType.ChunkCombinedCodeOffset:
return filled
elif fill_type_to == FillType.ChunkCombinedOffsetOffset:
chunk_offsets: list[cpy.OffsetArray | None] = []
chunk_outer_offsets: list[cpy.OffsetArray | None] = []
for codes, outer_offsets in zip(*filled[1:]):
if codes is None:
chunk_offsets.append(None)
chunk_outer_offsets.append(None)
else:
if TYPE_CHECKING:
assert outer_offsets is not None
offsets = arr.offsets_from_codes(codes)
outer_offsets = np.array([np.nonzero(offsets == oo)[0][0] for oo in outer_offsets],
dtype=offset_dtype)
chunk_offsets.append(offsets)
chunk_outer_offsets.append(outer_offsets)
ret3: cpy.FillReturn_ChunkCombinedOffsetOffset = (
filled[0], chunk_offsets, chunk_outer_offsets,
)
return ret3
else:
raise ValueError(f"Invalid FillType {fill_type_to}")
def _convert_filled_from_ChunkCombinedOffsetOffset(
filled: cpy.FillReturn_ChunkCombinedOffsetOffset,
fill_type_to: FillType,
) -> cpy.FillReturn:
if fill_type_to == FillType.OuterCode:
separate_points = []
separate_codes = []
for points, offsets, outer_offsets in zip(*filled):
if points is not None:
if TYPE_CHECKING:
assert offsets is not None
assert outer_offsets is not None
codes = arr.codes_from_offsets_and_points(offsets, points)
outer_offsets = offsets[outer_offsets]
separate_points += arr.split_points_by_offsets(points, outer_offsets)
separate_codes += arr.split_codes_by_offsets(codes, outer_offsets)
return (separate_points, separate_codes)
elif fill_type_to == FillType.OuterOffset:
separate_points = []
separate_offsets = []
for points, offsets, outer_offsets in zip(*filled):
if points is not None:
if TYPE_CHECKING:
assert offsets is not None
assert outer_offsets is not None
if len(outer_offsets) > 2:
separate_offsets += [offsets[s:e+1] - offsets[s] for s, e in
pairwise(outer_offsets)]
else:
separate_offsets.append(offsets)
separate_points += arr.split_points_by_offsets(points, offsets[outer_offsets])
return (separate_points, separate_offsets)
elif fill_type_to == FillType.ChunkCombinedCode:
chunk_codes: list[cpy.CodeArray | None] = []
for points, offsets, outer_offsets in zip(*filled):
if points is None:
chunk_codes.append(None)
else:
if TYPE_CHECKING:
assert offsets is not None
assert outer_offsets is not None
chunk_codes.append(arr.codes_from_offsets_and_points(offsets, points))
ret1: cpy.FillReturn_ChunkCombinedCode = (filled[0], chunk_codes)
return ret1
elif fill_type_to == FillType.ChunkCombinedOffset:
return (filled[0], filled[1])
elif fill_type_to == FillType.ChunkCombinedCodeOffset:
chunk_codes = []
chunk_outer_offsets: list[cpy.OffsetArray | None] = []
for points, offsets, outer_offsets in zip(*filled):
if points is None:
chunk_codes.append(None)
chunk_outer_offsets.append(None)
else:
if TYPE_CHECKING:
assert offsets is not None
assert outer_offsets is not None
chunk_codes.append(arr.codes_from_offsets_and_points(offsets, points))
chunk_outer_offsets.append(offsets[outer_offsets])
ret2: cpy.FillReturn_ChunkCombinedCodeOffset = (filled[0], chunk_codes, chunk_outer_offsets)
return ret2
elif fill_type_to == FillType.ChunkCombinedOffsetOffset:
return filled
else:
raise ValueError(f"Invalid FillType {fill_type_to}")
def convert_filled(
filled: cpy.FillReturn,
fill_type_from: FillType | str,
fill_type_to: FillType | str,
) -> cpy.FillReturn:
"""Convert filled contours from one :class:`~.FillType` to another.
Args:
filled (sequence of arrays): Filled contour polygons to convert, such as those returned by
:meth:`.ContourGenerator.filled`.
fill_type_from (FillType or str): :class:`~.FillType` to convert from as enum or
string equivalent.
fill_type_to (FillType or str): :class:`~.FillType` to convert to as enum or string
equivalent.
Return:
Converted filled contour polygons.
When converting non-chunked fill types (``FillType.OuterCode`` or ``FillType.OuterOffset``) to
chunked ones, all polygons are placed in the first chunk. When converting in the other
direction, all chunk information is discarded. Converting a fill type that is not aware of the
relationship between outer boundaries and contained holes (``FillType.ChunkCombinedCode`` or
``FillType.ChunkCombinedOffset``) to one that is will raise a ``ValueError``.
.. versionadded:: 1.2.0
"""
fill_type_from = as_fill_type(fill_type_from)
fill_type_to = as_fill_type(fill_type_to)
check_filled(filled, fill_type_from)
if fill_type_from == FillType.OuterCode:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_OuterCode, filled)
return _convert_filled_from_OuterCode(filled, fill_type_to)
elif fill_type_from == FillType.OuterOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_OuterOffset, filled)
return _convert_filled_from_OuterOffset(filled, fill_type_to)
elif fill_type_from == FillType.ChunkCombinedCode:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedCode, filled)
return _convert_filled_from_ChunkCombinedCode(filled, fill_type_to)
elif fill_type_from == FillType.ChunkCombinedOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedOffset, filled)
return _convert_filled_from_ChunkCombinedOffset(filled, fill_type_to)
elif fill_type_from == FillType.ChunkCombinedCodeOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedCodeOffset, filled)
return _convert_filled_from_ChunkCombinedCodeOffset(filled, fill_type_to)
elif fill_type_from == FillType.ChunkCombinedOffsetOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedOffsetOffset, filled)
return _convert_filled_from_ChunkCombinedOffsetOffset(filled, fill_type_to)
else:
raise ValueError(f"Invalid FillType {fill_type_from}")
def _convert_lines_from_Separate(
lines: cpy.LineReturn_Separate,
line_type_to: LineType,
) -> cpy.LineReturn:
if line_type_to == LineType.Separate:
return lines
elif line_type_to == LineType.SeparateCode:
separate_codes = [arr.codes_from_points(line) for line in lines]
return (lines, separate_codes)
elif line_type_to == LineType.ChunkCombinedCode:
if not lines:
ret1: cpy.LineReturn_ChunkCombinedCode = ([None], [None])
else:
points = arr.concat_points(lines)
offsets = arr.offsets_from_lengths(lines)
codes = arr.codes_from_offsets_and_points(offsets, points)
ret1 = ([points], [codes])
return ret1
elif line_type_to == LineType.ChunkCombinedOffset:
if not lines:
ret2: cpy.LineReturn_ChunkCombinedOffset = ([None], [None])
else:
ret2 = ([arr.concat_points(lines)], [arr.offsets_from_lengths(lines)])
return ret2
elif line_type_to == LineType.ChunkCombinedNan:
if not lines:
ret3: cpy.LineReturn_ChunkCombinedNan = ([None],)
else:
ret3 = ([arr.concat_points_with_nan(lines)],)
return ret3
else:
raise ValueError(f"Invalid LineType {line_type_to}")
def _convert_lines_from_SeparateCode(
lines: cpy.LineReturn_SeparateCode,
line_type_to: LineType,
) -> cpy.LineReturn:
if line_type_to == LineType.Separate:
# Drop codes.
return lines[0]
elif line_type_to == LineType.SeparateCode:
return lines
elif line_type_to == LineType.ChunkCombinedCode:
if not lines[0]:
ret1: cpy.LineReturn_ChunkCombinedCode = ([None], [None])
else:
ret1 = ([arr.concat_points(lines[0])], [arr.concat_codes(lines[1])])
return ret1
elif line_type_to == LineType.ChunkCombinedOffset:
if not lines[0]:
ret2: cpy.LineReturn_ChunkCombinedOffset = ([None], [None])
else:
ret2 = ([arr.concat_points(lines[0])], [arr.offsets_from_lengths(lines[0])])
return ret2
elif line_type_to == LineType.ChunkCombinedNan:
if not lines[0]:
ret3: cpy.LineReturn_ChunkCombinedNan = ([None],)
else:
ret3 = ([arr.concat_points_with_nan(lines[0])],)
return ret3
else:
raise ValueError(f"Invalid LineType {line_type_to}")
def _convert_lines_from_ChunkCombinedCode(
lines: cpy.LineReturn_ChunkCombinedCode,
line_type_to: LineType,
) -> cpy.LineReturn:
if line_type_to in (LineType.Separate, LineType.SeparateCode):
separate_lines = []
for points, codes in zip(*lines):
if points is not None:
if TYPE_CHECKING:
assert codes is not None
split_at = np.nonzero(codes == MOVETO)[0]
if len(split_at) > 1:
separate_lines += np.split(points, split_at[1:])
else:
separate_lines.append(points)
if line_type_to == LineType.Separate:
return separate_lines
else:
separate_codes = [arr.codes_from_points(line) for line in separate_lines]
return (separate_lines, separate_codes)
elif line_type_to == LineType.ChunkCombinedCode:
return lines
elif line_type_to == LineType.ChunkCombinedOffset:
chunk_offsets = [None if codes is None else arr.offsets_from_codes(codes)
for codes in lines[1]]
return (lines[0], chunk_offsets)
elif line_type_to == LineType.ChunkCombinedNan:
points_nan: list[cpy.PointArray | None] = []
for points, codes in zip(*lines):
if points is None:
points_nan.append(None)
else:
if TYPE_CHECKING:
assert codes is not None
offsets = arr.offsets_from_codes(codes)
points_nan.append(arr.insert_nan_at_offsets(points, offsets))
return (points_nan,)
else:
raise ValueError(f"Invalid LineType {line_type_to}")
def _convert_lines_from_ChunkCombinedOffset(
lines: cpy.LineReturn_ChunkCombinedOffset,
line_type_to: LineType,
) -> cpy.LineReturn:
if line_type_to in (LineType.Separate, LineType.SeparateCode):
separate_lines = []
for points, offsets in zip(*lines):
if points is not None:
if TYPE_CHECKING:
assert offsets is not None
separate_lines += arr.split_points_by_offsets(points, offsets)
if line_type_to == LineType.Separate:
return separate_lines
else:
separate_codes = [arr.codes_from_points(line) for line in separate_lines]
return (separate_lines, separate_codes)
elif line_type_to == LineType.ChunkCombinedCode:
chunk_codes: list[cpy.CodeArray | None] = []
for points, offsets in zip(*lines):
if points is None:
chunk_codes.append(None)
else:
if TYPE_CHECKING:
assert offsets is not None
chunk_codes.append(arr.codes_from_offsets_and_points(offsets, points))
return (lines[0], chunk_codes)
elif line_type_to == LineType.ChunkCombinedOffset:
return lines
elif line_type_to == LineType.ChunkCombinedNan:
points_nan: list[cpy.PointArray | None] = []
for points, offsets in zip(*lines):
if points is None:
points_nan.append(None)
else:
if TYPE_CHECKING:
assert offsets is not None
points_nan.append(arr.insert_nan_at_offsets(points, offsets))
return (points_nan,)
else:
raise ValueError(f"Invalid LineType {line_type_to}")
def _convert_lines_from_ChunkCombinedNan(
lines: cpy.LineReturn_ChunkCombinedNan,
line_type_to: LineType,
) -> cpy.LineReturn:
if line_type_to in (LineType.Separate, LineType.SeparateCode):
separate_lines = []
for points in lines[0]:
if points is not None:
separate_lines += arr.split_points_at_nan(points)
if line_type_to == LineType.Separate:
return separate_lines
else:
separate_codes = [arr.codes_from_points(points) for points in separate_lines]
return (separate_lines, separate_codes)
elif line_type_to == LineType.ChunkCombinedCode:
chunk_points: list[cpy.PointArray | None] = []
chunk_codes: list[cpy.CodeArray | None] = []
for points in lines[0]:
if points is None:
chunk_points.append(None)
chunk_codes.append(None)
else:
points, offsets = arr.remove_nan(points)
chunk_points.append(points)
chunk_codes.append(arr.codes_from_offsets_and_points(offsets, points))
return (chunk_points, chunk_codes)
elif line_type_to == LineType.ChunkCombinedOffset:
chunk_points = []
chunk_offsets: list[cpy.OffsetArray | None] = []
for points in lines[0]:
if points is None:
chunk_points.append(None)
chunk_offsets.append(None)
else:
points, offsets = arr.remove_nan(points)
chunk_points.append(points)
chunk_offsets.append(offsets)
return (chunk_points, chunk_offsets)
elif line_type_to == LineType.ChunkCombinedNan:
return lines
else:
raise ValueError(f"Invalid LineType {line_type_to}")
def convert_lines(
lines: cpy.LineReturn,
line_type_from: LineType | str,
line_type_to: LineType | str,
) -> cpy.LineReturn:
"""Convert contour lines from one :class:`~.LineType` to another.
Args:
lines (sequence of arrays): Contour lines to convert, such as those returned by
:meth:`.ContourGenerator.lines`.
line_type_from (LineType or str): :class:`~.LineType` to convert from as enum or
string equivalent.
line_type_to (LineType or str): :class:`~.LineType` to convert to as enum or string
equivalent.
Return:
Converted contour lines.
When converting non-chunked line types (``LineType.Separate`` or ``LineType.SeparateCode``) to
chunked ones (``LineType.ChunkCombinedCode``, ``LineType.ChunkCombinedOffset`` or
``LineType.ChunkCombinedNan``), all lines are placed in the first chunk. When converting in the
other direction, all chunk information is discarded.
.. versionadded:: 1.2.0
"""
line_type_from = as_line_type(line_type_from)
line_type_to = as_line_type(line_type_to)
check_lines(lines, line_type_from)
if line_type_from == LineType.Separate:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_Separate, lines)
return _convert_lines_from_Separate(lines, line_type_to)
elif line_type_from == LineType.SeparateCode:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_SeparateCode, lines)
return _convert_lines_from_SeparateCode(lines, line_type_to)
elif line_type_from == LineType.ChunkCombinedCode:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedCode, lines)
return _convert_lines_from_ChunkCombinedCode(lines, line_type_to)
elif line_type_from == LineType.ChunkCombinedOffset:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedOffset, lines)
return _convert_lines_from_ChunkCombinedOffset(lines, line_type_to)
elif line_type_from == LineType.ChunkCombinedNan:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedNan, lines)
return _convert_lines_from_ChunkCombinedNan(lines, line_type_to)
else:
raise ValueError(f"Invalid LineType {line_type_from}")
def convert_multi_filled(
multi_filled: list[cpy.FillReturn],
fill_type_from: FillType | str,
fill_type_to: FillType | str,
) -> list[cpy.FillReturn]:
"""Convert multiple sets of filled contours from one :class:`~.FillType` to another.
Args:
multi_filled (nested sequence of arrays): Filled contour polygons to convert, such as those
returned by :meth:`.ContourGenerator.multi_filled`.
fill_type_from (FillType or str): :class:`~.FillType` to convert from as enum or
string equivalent.
fill_type_to (FillType or str): :class:`~.FillType` to convert to as enum or string
equivalent.
Return:
Converted sets filled contour polygons.
When converting non-chunked fill types (``FillType.OuterCode`` or ``FillType.OuterOffset``) to
chunked ones, all polygons are placed in the first chunk. When converting in the other
direction, all chunk information is discarded. Converting a fill type that is not aware of the
relationship between outer boundaries and contained holes (``FillType.ChunkCombinedCode`` or
``FillType.ChunkCombinedOffset``) to one that is will raise a ``ValueError``.
.. versionadded:: 1.3.0
"""
fill_type_from = as_fill_type(fill_type_from)
fill_type_to = as_fill_type(fill_type_to)
return [convert_filled(filled, fill_type_from, fill_type_to) for filled in multi_filled]
def convert_multi_lines(
multi_lines: list[cpy.LineReturn],
line_type_from: LineType | str,
line_type_to: LineType | str,
) -> list[cpy.LineReturn]:
"""Convert multiple sets of contour lines from one :class:`~.LineType` to another.
Args:
multi_lines (nested sequence of arrays): Contour lines to convert, such as those returned by
:meth:`.ContourGenerator.multi_lines`.
line_type_from (LineType or str): :class:`~.LineType` to convert from as enum or
string equivalent.
line_type_to (LineType or str): :class:`~.LineType` to convert to as enum or string
equivalent.
Return:
Converted set of contour lines.
When converting non-chunked line types (``LineType.Separate`` or ``LineType.SeparateCode``) to
chunked ones (``LineType.ChunkCombinedCode``, ``LineType.ChunkCombinedOffset`` or
``LineType.ChunkCombinedNan``), all lines are placed in the first chunk. When converting in the
other direction, all chunk information is discarded.
.. versionadded:: 1.3.0
"""
line_type_from = as_line_type(line_type_from)
line_type_to = as_line_type(line_type_to)
return [convert_lines(lines, line_type_from, line_type_to) for lines in multi_lines]
venv\Lib\site-packages\contourpy\dechunk.py
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from contourpy._contourpy import FillType, LineType
from contourpy.array import (
concat_codes_or_none,
concat_offsets_or_none,
concat_points_or_none,
concat_points_or_none_with_nan,
)
from contourpy.enum_util import as_fill_type, as_line_type
from contourpy.typecheck import check_filled, check_lines
if TYPE_CHECKING:
import contourpy._contourpy as cpy
def dechunk_filled(filled: cpy.FillReturn, fill_type: FillType | str) -> cpy.FillReturn:
"""Return the specified filled contours with chunked data moved into the first chunk.
Filled contours that are not chunked (``FillType.OuterCode`` and ``FillType.OuterOffset``) and
those that are but only contain a single chunk are returned unmodified. Individual polygons are
unchanged, they are not geometrically combined.
Args:
filled (sequence of arrays): Filled contour data, such as returned by
:meth:`.ContourGenerator.filled`.
fill_type (FillType or str): Type of :meth:`~.ContourGenerator.filled` as enum or string
equivalent.
Return:
Filled contours in a single chunk.
.. versionadded:: 1.2.0
"""
fill_type = as_fill_type(fill_type)
if fill_type in (FillType.OuterCode, FillType.OuterOffset):
# No-op if fill_type is not chunked.
return filled
check_filled(filled, fill_type)
if len(filled[0]) < 2:
# No-op if just one chunk.
return filled
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_Chunk, filled)
points = concat_points_or_none(filled[0])
if fill_type == FillType.ChunkCombinedCode:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedCode, filled)
if points is None:
ret1: cpy.FillReturn_ChunkCombinedCode = ([None], [None])
else:
ret1 = ([points], [concat_codes_or_none(filled[1])])
return ret1
elif fill_type == FillType.ChunkCombinedOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedOffset, filled)
if points is None:
ret2: cpy.FillReturn_ChunkCombinedOffset = ([None], [None])
else:
ret2 = ([points], [concat_offsets_or_none(filled[1])])
return ret2
elif fill_type == FillType.ChunkCombinedCodeOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedCodeOffset, filled)
if points is None:
ret3: cpy.FillReturn_ChunkCombinedCodeOffset = ([None], [None], [None])
else:
outer_offsets = concat_offsets_or_none(filled[2])
ret3 = ([points], [concat_codes_or_none(filled[1])], [outer_offsets])
return ret3
elif fill_type == FillType.ChunkCombinedOffsetOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedOffsetOffset, filled)
if points is None:
ret4: cpy.FillReturn_ChunkCombinedOffsetOffset = ([None], [None], [None])
else:
outer_offsets = concat_offsets_or_none(filled[2])
ret4 = ([points], [concat_offsets_or_none(filled[1])], [outer_offsets])
return ret4
else:
raise ValueError(f"Invalid FillType {fill_type}")
def dechunk_lines(lines: cpy.LineReturn, line_type: LineType | str) -> cpy.LineReturn:
"""Return the specified contour lines with chunked data moved into the first chunk.
Contour lines that are not chunked (``LineType.Separate`` and ``LineType.SeparateCode``) and
those that are but only contain a single chunk are returned unmodified. Individual lines are
unchanged, they are not geometrically combined.
Args:
lines (sequence of arrays): Contour line data, such as returned by
:meth:`.ContourGenerator.lines`.
line_type (LineType or str): Type of :meth:`~.ContourGenerator.lines` as enum or string
equivalent.
Return:
Contour lines in a single chunk.
.. versionadded:: 1.2.0
"""
line_type = as_line_type(line_type)
if line_type in (LineType.Separate, LineType.SeparateCode):
# No-op if line_type is not chunked.
return lines
check_lines(lines, line_type)
if len(lines[0]) < 2:
# No-op if just one chunk.
return lines
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_Chunk, lines)
if line_type == LineType.ChunkCombinedCode:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedCode, lines)
points = concat_points_or_none(lines[0])
if points is None:
ret1: cpy.LineReturn_ChunkCombinedCode = ([None], [None])
else:
ret1 = ([points], [concat_codes_or_none(lines[1])])
return ret1
elif line_type == LineType.ChunkCombinedOffset:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedOffset, lines)
points = concat_points_or_none(lines[0])
if points is None:
ret2: cpy.LineReturn_ChunkCombinedOffset = ([None], [None])
else:
ret2 = ([points], [concat_offsets_or_none(lines[1])])
return ret2
elif line_type == LineType.ChunkCombinedNan:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedNan, lines)
points = concat_points_or_none_with_nan(lines[0])
ret3: cpy.LineReturn_ChunkCombinedNan = ([points],)
return ret3
else:
raise ValueError(f"Invalid LineType {line_type}")
def dechunk_multi_filled(
multi_filled: list[cpy.FillReturn],
fill_type: FillType | str,
) -> list[cpy.FillReturn]:
"""Return multiple sets of filled contours with chunked data moved into the first chunks.
Filled contours that are not chunked (``FillType.OuterCode`` and ``FillType.OuterOffset``) and
those that are but only contain a single chunk are returned unmodified. Individual polygons are
unchanged, they are not geometrically combined.
Args:
multi_filled (nested sequence of arrays): Filled contour data, such as returned by
:meth:`.ContourGenerator.multi_filled`.
fill_type (FillType or str): Type of :meth:`~.ContourGenerator.filled` as enum or string
equivalent.
Return:
Multiple sets of filled contours in a single chunk.
.. versionadded:: 1.3.0
"""
fill_type = as_fill_type(fill_type)
if fill_type in (FillType.OuterCode, FillType.OuterOffset):
# No-op if fill_type is not chunked.
return multi_filled
return [dechunk_filled(filled, fill_type) for filled in multi_filled]
def dechunk_multi_lines(
multi_lines: list[cpy.LineReturn],
line_type: LineType | str,
) -> list[cpy.LineReturn]:
"""Return multiple sets of contour lines with all chunked data moved into the first chunks.
Contour lines that are not chunked (``LineType.Separate`` and ``LineType.SeparateCode``) and
those that are but only contain a single chunk are returned unmodified. Individual lines are
unchanged, they are not geometrically combined.
Args:
multi_lines (nested sequence of arrays): Contour line data, such as returned by
:meth:`.ContourGenerator.multi_lines`.
line_type (LineType or str): Type of :meth:`~.ContourGenerator.lines` as enum or string
equivalent.
Return:
Multiple sets of contour lines in a single chunk.
.. versionadded:: 1.3.0
"""
line_type = as_line_type(line_type)
if line_type in (LineType.Separate, LineType.SeparateCode):
# No-op if line_type is not chunked.
return multi_lines
return [dechunk_lines(lines, line_type) for lines in multi_lines]
venv\Lib\site-packages\contourpy\enum_util.py
from __future__ import annotations
from contourpy._contourpy import FillType, LineType, ZInterp
def as_fill_type(fill_type: FillType | str) -> FillType:
"""Coerce a FillType or string value to a FillType.
Args:
fill_type (FillType or str): Value to convert.
Return:
FillType: Converted value.
"""
if isinstance(fill_type, str):
try:
return FillType.__members__[fill_type]
except KeyError as e:
raise ValueError(f"'{fill_type}' is not a valid FillType") from e
else:
return fill_type
def as_line_type(line_type: LineType | str) -> LineType:
"""Coerce a LineType or string value to a LineType.
Args:
line_type (LineType or str): Value to convert.
Return:
LineType: Converted value.
"""
if isinstance(line_type, str):
try:
return LineType.__members__[line_type]
except KeyError as e:
raise ValueError(f"'{line_type}' is not a valid LineType") from e
else:
return line_type
def as_z_interp(z_interp: ZInterp | str) -> ZInterp:
"""Coerce a ZInterp or string value to a ZInterp.
Args:
z_interp (ZInterp or str): Value to convert.
Return:
ZInterp: Converted value.
"""
if isinstance(z_interp, str):
try:
return ZInterp.__members__[z_interp]
except KeyError as e:
raise ValueError(f"'{z_interp}' is not a valid ZInterp") from e
else:
return z_interp
venv\Lib\site-packages\contourpy\typecheck.py
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
import numpy as np
from contourpy import FillType, LineType
from contourpy.enum_util import as_fill_type, as_line_type
from contourpy.types import MOVETO, code_dtype, offset_dtype, point_dtype
if TYPE_CHECKING:
import contourpy._contourpy as cpy
# Minimalist array-checking functions that check dtype, ndims and shape only.
# They do not walk the arrays to check the contents for performance reasons.
def check_code_array(codes: Any) -> None:
if not isinstance(codes, np.ndarray):
raise TypeError(f"Expected numpy array not {type(codes)}")
if codes.dtype != code_dtype:
raise ValueError(f"Expected numpy array of dtype {code_dtype} not {codes.dtype}")
if not (codes.ndim == 1 and len(codes) > 1):
raise ValueError(f"Expected numpy array of shape (?,) not {codes.shape}")
if codes[0] != MOVETO:
raise ValueError(f"First element of code array must be {MOVETO}, not {codes[0]}")
def check_offset_array(offsets: Any) -> None:
if not isinstance(offsets, np.ndarray):
raise TypeError(f"Expected numpy array not {type(offsets)}")
if offsets.dtype != offset_dtype:
raise ValueError(f"Expected numpy array of dtype {offset_dtype} not {offsets.dtype}")
if not (offsets.ndim == 1 and len(offsets) > 1):
raise ValueError(f"Expected numpy array of shape (?,) not {offsets.shape}")
if offsets[0] != 0:
raise ValueError(f"First element of offset array must be 0, not {offsets[0]}")
def check_point_array(points: Any) -> None:
if not isinstance(points, np.ndarray):
raise TypeError(f"Expected numpy array not {type(points)}")
if points.dtype != point_dtype:
raise ValueError(f"Expected numpy array of dtype {point_dtype} not {points.dtype}")
if not (points.ndim == 2 and points.shape[1] ==2 and points.shape[0] > 1):
raise ValueError(f"Expected numpy array of shape (?, 2) not {points.shape}")
def _check_tuple_of_lists_with_same_length(
maybe_tuple: Any,
tuple_length: int,
allow_empty_lists: bool = True,
) -> None:
if not isinstance(maybe_tuple, tuple):
raise TypeError(f"Expected tuple not {type(maybe_tuple)}")
if len(maybe_tuple) != tuple_length:
raise ValueError(f"Expected tuple of length {tuple_length} not {len(maybe_tuple)}")
for maybe_list in maybe_tuple:
if not isinstance(maybe_list, list):
msg = f"Expected tuple to contain {tuple_length} lists but found a {type(maybe_list)}"
raise TypeError(msg)
lengths = [len(item) for item in maybe_tuple]
if len(set(lengths)) != 1:
msg = f"Expected {tuple_length} lists with same length but lengths are {lengths}"
raise ValueError(msg)
if not allow_empty_lists and lengths[0] == 0:
raise ValueError(f"Expected {tuple_length} non-empty lists")
def check_filled(filled: cpy.FillReturn, fill_type: FillType | str) -> None:
fill_type = as_fill_type(fill_type)
if fill_type == FillType.OuterCode:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_OuterCode, filled)
_check_tuple_of_lists_with_same_length(filled, 2)
for i, (points, codes) in enumerate(zip(*filled)):
check_point_array(points)
check_code_array(codes)
if len(points) != len(codes):
raise ValueError(f"Points and codes have different lengths in polygon {i}")
elif fill_type == FillType.OuterOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_OuterOffset, filled)
_check_tuple_of_lists_with_same_length(filled, 2)
for i, (points, offsets) in enumerate(zip(*filled)):
check_point_array(points)
check_offset_array(offsets)
if offsets[-1] != len(points):
raise ValueError(f"Inconsistent points and offsets in polygon {i}")
elif fill_type == FillType.ChunkCombinedCode:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedCode, filled)
_check_tuple_of_lists_with_same_length(filled, 2, allow_empty_lists=False)
for chunk, (points_or_none, codes_or_none) in enumerate(zip(*filled)):
if points_or_none is not None and codes_or_none is not None:
check_point_array(points_or_none)
check_code_array(codes_or_none)
if len(points_or_none) != len(codes_or_none):
raise ValueError(f"Points and codes have different lengths in chunk {chunk}")
elif not (points_or_none is None and codes_or_none is None):
raise ValueError(f"Inconsistent Nones in chunk {chunk}")
elif fill_type == FillType.ChunkCombinedOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedOffset, filled)
_check_tuple_of_lists_with_same_length(filled, 2, allow_empty_lists=False)
for chunk, (points_or_none, offsets_or_none) in enumerate(zip(*filled)):
if points_or_none is not None and offsets_or_none is not None:
check_point_array(points_or_none)
check_offset_array(offsets_or_none)
if offsets_or_none[-1] != len(points_or_none):
raise ValueError(f"Inconsistent points and offsets in chunk {chunk}")
elif not (points_or_none is None and offsets_or_none is None):
raise ValueError(f"Inconsistent Nones in chunk {chunk}")
elif fill_type == FillType.ChunkCombinedCodeOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedCodeOffset, filled)
_check_tuple_of_lists_with_same_length(filled, 3, allow_empty_lists=False)
for i, (points_or_none, codes_or_none, outer_offsets_or_none) in enumerate(zip(*filled)):
if (points_or_none is not None and codes_or_none is not None and
outer_offsets_or_none is not None):
check_point_array(points_or_none)
check_code_array(codes_or_none)
check_offset_array(outer_offsets_or_none)
if len(codes_or_none) != len(points_or_none):
raise ValueError(f"Points and codes have different lengths in chunk {i}")
if outer_offsets_or_none[-1] != len(codes_or_none):
raise ValueError(f"Inconsistent codes and outer_offsets in chunk {i}")
elif not (points_or_none is None and codes_or_none is None and
outer_offsets_or_none is None):
raise ValueError(f"Inconsistent Nones in chunk {i}")
elif fill_type == FillType.ChunkCombinedOffsetOffset:
if TYPE_CHECKING:
filled = cast(cpy.FillReturn_ChunkCombinedOffsetOffset, filled)
_check_tuple_of_lists_with_same_length(filled, 3, allow_empty_lists=False)
for i, (points_or_none, offsets_or_none, outer_offsets_or_none) in enumerate(zip(*filled)):
if (points_or_none is not None and offsets_or_none is not None and
outer_offsets_or_none is not None):
check_point_array(points_or_none)
check_offset_array(offsets_or_none)
check_offset_array(outer_offsets_or_none)
if offsets_or_none[-1] != len(points_or_none):
raise ValueError(f"Inconsistent points and offsets in chunk {i}")
if outer_offsets_or_none[-1] != len(offsets_or_none) - 1:
raise ValueError(f"Inconsistent offsets and outer_offsets in chunk {i}")
elif not (points_or_none is None and offsets_or_none is None and
outer_offsets_or_none is None):
raise ValueError(f"Inconsistent Nones in chunk {i}")
else:
raise ValueError(f"Invalid FillType {fill_type}")
def check_lines(lines: cpy.LineReturn, line_type: LineType | str) -> None:
line_type = as_line_type(line_type)
if line_type == LineType.Separate:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_Separate, lines)
if not isinstance(lines, list):
raise TypeError(f"Expected list not {type(lines)}")
for points in lines:
check_point_array(points)
elif line_type == LineType.SeparateCode:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_SeparateCode, lines)
_check_tuple_of_lists_with_same_length(lines, 2)
for i, (points, codes) in enumerate(zip(*lines)):
check_point_array(points)
check_code_array(codes)
if len(points) != len(codes):
raise ValueError(f"Points and codes have different lengths in line {i}")
elif line_type == LineType.ChunkCombinedCode:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedCode, lines)
_check_tuple_of_lists_with_same_length(lines, 2, allow_empty_lists=False)
for chunk, (points_or_none, codes_or_none) in enumerate(zip(*lines)):
if points_or_none is not None and codes_or_none is not None:
check_point_array(points_or_none)
check_code_array(codes_or_none)
if len(points_or_none) != len(codes_or_none):
raise ValueError(f"Points and codes have different lengths in chunk {chunk}")
elif not (points_or_none is None and codes_or_none is None):
raise ValueError(f"Inconsistent Nones in chunk {chunk}")
elif line_type == LineType.ChunkCombinedOffset:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedOffset, lines)
_check_tuple_of_lists_with_same_length(lines, 2, allow_empty_lists=False)
for chunk, (points_or_none, offsets_or_none) in enumerate(zip(*lines)):
if points_or_none is not None and offsets_or_none is not None:
check_point_array(points_or_none)
check_offset_array(offsets_or_none)
if offsets_or_none[-1] != len(points_or_none):
raise ValueError(f"Inconsistent points and offsets in chunk {chunk}")
elif not (points_or_none is None and offsets_or_none is None):
raise ValueError(f"Inconsistent Nones in chunk {chunk}")
elif line_type == LineType.ChunkCombinedNan:
if TYPE_CHECKING:
lines = cast(cpy.LineReturn_ChunkCombinedNan, lines)
_check_tuple_of_lists_with_same_length(lines, 1, allow_empty_lists=False)
for _chunk, points_or_none in enumerate(lines[0]):
if points_or_none is not None:
check_point_array(points_or_none)
else:
raise ValueError(f"Invalid LineType {line_type}")
venv\Lib\site-packages\contourpy\types.py
from __future__ import annotations
import numpy as np
# dtypes of arrays returned by ContourPy.
point_dtype = np.float64
code_dtype = np.uint8
offset_dtype = np.uint32
# Kind codes used in Matplotlib Paths.
MOVETO = 1
LINETO = 2
CLOSEPOLY = 79
venv\Lib\site-packages\contourpy\_version.py
__version__ = "1.3.3"
venv\Lib\site-packages\contourpy\__init__.py
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from contourpy._contourpy import (
ContourGenerator,
FillType,
LineType,
Mpl2005ContourGenerator,
Mpl2014ContourGenerator,
SerialContourGenerator,
ThreadedContourGenerator,
ZInterp,
max_threads,
)
from contourpy._version import __version__
from contourpy.chunk import calc_chunk_sizes
from contourpy.convert import (
convert_filled,
convert_lines,
convert_multi_filled,
convert_multi_lines,
)
from contourpy.dechunk import (
dechunk_filled,
dechunk_lines,
dechunk_multi_filled,
dechunk_multi_lines,
)
from contourpy.enum_util import as_fill_type, as_line_type, as_z_interp
if TYPE_CHECKING:
from typing import Any
from numpy.typing import ArrayLike
from ._contourpy import CoordinateArray, MaskArray
__all__ = [
"__version__",
"contour_generator",
"convert_filled",
"convert_lines",
"convert_multi_filled",
"convert_multi_lines",
"dechunk_filled",
"dechunk_lines",
"dechunk_multi_filled",
"dechunk_multi_lines",
"max_threads",
"FillType",
"LineType",
"ContourGenerator",
"Mpl2005ContourGenerator",
"Mpl2014ContourGenerator",
"SerialContourGenerator",
"ThreadedContourGenerator",
"ZInterp",
]
# Simple mapping of algorithm name to class name.
_class_lookup: dict[str, type[ContourGenerator]] = {
"mpl2005": Mpl2005ContourGenerator,
"mpl2014": Mpl2014ContourGenerator,
"serial": SerialContourGenerator,
"threaded": ThreadedContourGenerator,
}
def _remove_z_mask(
z: ArrayLike | np.ma.MaskedArray[Any, Any] | None,
) -> tuple[CoordinateArray, MaskArray | None]:
# Preserve mask if present.
z_array = np.ma.asarray(z, dtype=np.float64) # type: ignore[no-untyped-call]
z_masked = np.ma.masked_invalid(z_array, copy=False) # type: ignore[no-untyped-call]
if np.ma.is_masked(z_masked):
mask = np.ma.getmask(z_masked)
else:
mask = None
return np.ma.getdata(z_masked), mask # type: ignore[no-untyped-call]
def contour_generator(
x: ArrayLike | None = None,
y: ArrayLike | None = None,
z: ArrayLike | np.ma.MaskedArray[Any, Any] | None = None,
*,
name: str = "serial",
corner_mask: bool | None = None,
line_type: LineType | str | None = None,
fill_type: FillType | str | None = None,
chunk_size: int | tuple[int, int] | None = None,
chunk_count: int | tuple[int, int] | None = None,
total_chunk_count: int | None = None,
quad_as_tri: bool = False,
z_interp: ZInterp | str | None = ZInterp.Linear,
thread_count: int = 0,
) -> ContourGenerator:
"""Create and return a :class:`~.ContourGenerator` object.
The class and properties of the returned :class:`~.ContourGenerator` are determined by the
function arguments, with sensible defaults.
Args:
x (array-like of shape (ny, nx) or (nx,), optional): The x-coordinates of the ``z`` values.
May be 2D with the same shape as ``z.shape``, or 1D with length ``nx = z.shape[1]``.
If not specified are assumed to be ``np.arange(nx)``. Must be ordered monotonically.
y (array-like of shape (ny, nx) or (ny,), optional): The y-coordinates of the ``z`` values.
May be 2D with the same shape as ``z.shape``, or 1D with length ``ny = z.shape[0]``.
If not specified are assumed to be ``np.arange(ny)``. Must be ordered monotonically.
z (array-like of shape (ny, nx), may be a masked array): The 2D gridded values to calculate
the contours of. May be a masked array, and any invalid values (``np.inf`` or
``np.nan``) will also be masked out.
name (str): Algorithm name, one of ``"serial"``, ``"threaded"``, ``"mpl2005"`` or
``"mpl2014"``, default ``"serial"``.
corner_mask (bool, optional): Enable/disable corner masking, which only has an effect if
``z`` is a masked array. If ``False``, any quad touching a masked point is masked out.
If ``True``, only the triangular corners of quads nearest these points are always masked
out, other triangular corners comprising three unmasked points are contoured as usual.
If not specified, uses the default provided by the algorithm ``name``.
line_type (LineType or str, optional): The format of contour line data returned from calls
to :meth:`~.ContourGenerator.lines`, specified either as a :class:`~.LineType` or its
string equivalent such as ``"SeparateCode"``.
If not specified, uses the default provided by the algorithm ``name``.
The relationship between the :class:`~.LineType` enum and the data format returned from
:meth:`~.ContourGenerator.lines` is explained at :ref:`line_type`.
fill_type (FillType or str, optional): The format of filled contour data returned from calls
to :meth:`~.ContourGenerator.filled`, specified either as a :class:`~.FillType` or its
string equivalent such as ``"OuterOffset"``.
If not specified, uses the default provided by the algorithm ``name``.
The relationship between the :class:`~.FillType` enum and the data format returned from
:meth:`~.ContourGenerator.filled` is explained at :ref:`fill_type`.
chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same
size in both directions if only one value is specified.
chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the
same count in both directions if only one value is specified.
total_chunk_count (int, optional): Total number of chunks.
quad_as_tri (bool): Enable/disable treating quads as 4 triangles, default ``False``.
If ``False``, a contour line within a quad is a straight line between points on two of
its edges. If ``True``, each full quad is divided into 4 triangles using a virtual point
at the centre (mean x, y of the corner points) and a contour line is piecewise linear
within those triangles. Corner-masked triangles are not affected by this setting, only
full unmasked quads.
z_interp (ZInterp or str, optional): How to interpolate ``z`` values when determining where
contour lines intersect the edges of quads and the ``z`` values of the central points of
quads, specified either as a :class:`~contourpy.ZInterp` or its string equivalent such
as ``"Log"``. Default is ``ZInterp.Linear``.
thread_count (int): Number of threads to use for contour calculation, default 0. Threads can
only be used with an algorithm ``name`` that supports threads (currently only
``name="threaded"``) and there must be at least the same number of chunks as threads.
If ``thread_count=0`` and ``name="threaded"`` then it uses the maximum number of threads
as determined by the C++11 call ``std::thread::hardware_concurrency()``. If ``name`` is
something other than ``"threaded"`` then the ``thread_count`` will be set to ``1``.
Return:
:class:`~.ContourGenerator`.
Note:
A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be
specified.
Warning:
The ``name="mpl2005"`` algorithm does not implement chunking for contour lines.
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z, mask = _remove_z_mask(z)
# Check arguments: z.
if z.ndim != 2:
raise TypeError(f"Input z must be 2D, not {z.ndim}D")
if z.shape[0] < 2 or z.shape[1] < 2:
raise TypeError(f"Input z must be at least a (2, 2) shaped array, but has shape {z.shape}")
ny, nx = z.shape
# Check arguments: x and y.
if x.ndim != y.ndim:
raise TypeError(f"Number of dimensions of x ({x.ndim}) and y ({y.ndim}) do not match")
if x.ndim == 0:
x = np.arange(nx, dtype=np.float64)
y = np.arange(ny, dtype=np.float64)
x, y = np.meshgrid(x, y)
elif x.ndim == 1:
if len(x) != nx:
raise TypeError(f"Length of x ({len(x)}) must match number of columns in z ({nx})")
if len(y) != ny:
raise TypeError(f"Length of y ({len(y)}) must match number of rows in z ({ny})")
x, y = np.meshgrid(x, y)
elif x.ndim == 2:
if x.shape != z.shape:
raise TypeError(f"Shapes of x {x.shape} and z {z.shape} do not match")
if y.shape != z.shape:
raise TypeError(f"Shapes of y {y.shape} and z {z.shape} do not match")
else:
raise TypeError(f"Inputs x and y must be None, 1D or 2D, not {x.ndim}D")
# Check mask shape just in case.
if mask is not None and mask.shape != z.shape:
raise ValueError("If mask is set it must be a 2D array with the same shape as z")
# Check arguments: name.
if name not in _class_lookup:
raise ValueError(f"Unrecognised contour generator name: {name}")
# Check arguments: chunk_size, chunk_count and total_chunk_count.
y_chunk_size, x_chunk_size = calc_chunk_sizes(
chunk_size, chunk_count, total_chunk_count, ny, nx)
cls = _class_lookup[name]
# Check arguments: corner_mask.
if corner_mask is None:
# Set it to default, which is True if the algorithm supports it.
corner_mask = cls.supports_corner_mask()
elif corner_mask and not cls.supports_corner_mask():
raise ValueError(f"{name} contour generator does not support corner_mask=True")
# Check arguments: line_type.
if line_type is None:
line_type = cls.default_line_type
else:
line_type = as_line_type(line_type)
if not cls.supports_line_type(line_type):
raise ValueError(f"{name} contour generator does not support line_type {line_type}")
# Check arguments: fill_type.
if fill_type is None:
fill_type = cls.default_fill_type
else:
fill_type = as_fill_type(fill_type)
if not cls.supports_fill_type(fill_type):
raise ValueError(f"{name} contour generator does not support fill_type {fill_type}")
# Check arguments: quad_as_tri.
if quad_as_tri and not cls.supports_quad_as_tri():
raise ValueError(f"{name} contour generator does not support quad_as_tri=True")
# Check arguments: z_interp.
if z_interp is None:
z_interp = ZInterp.Linear
else:
z_interp = as_z_interp(z_interp)
if z_interp != ZInterp.Linear and not cls.supports_z_interp():
raise ValueError(f"{name} contour generator does not support z_interp {z_interp}")
# Check arguments: thread_count.
if thread_count not in (0, 1) and not cls.supports_threads():
raise ValueError(f"{name} contour generator does not support thread_count {thread_count}")
# Prepare args and kwargs for contour generator constructor.
args = [x, y, z, mask]
kwargs: dict[str, int | bool | LineType | FillType | ZInterp] = {
"x_chunk_size": x_chunk_size,
"y_chunk_size": y_chunk_size,
}
if name not in ("mpl2005", "mpl2014"):
kwargs["line_type"] = line_type
kwargs["fill_type"] = fill_type
if cls.supports_corner_mask():
kwargs["corner_mask"] = corner_mask
if cls.supports_quad_as_tri():
kwargs["quad_as_tri"] = quad_as_tri
if cls.supports_z_interp():
kwargs["z_interp"] = z_interp
if cls.supports_threads():
kwargs["thread_count"] = thread_count
# Create contour generator.
return cls(*args, **kwargs)
venv\Lib\site-packages\cycler\__init__.py
"""
Cycler
======
Cycling through combinations of values, producing dictionaries.
You can add cyclers::
from cycler import cycler
cc = (cycler(color=list('rgb')) +
cycler(linestyle=['-', '--', '-.']))
for d in cc:
print(d)
Results in::
{'color': 'r', 'linestyle': '-'}
{'color': 'g', 'linestyle': '--'}
{'color': 'b', 'linestyle': '-.'}
You can multiply cyclers::
from cycler import cycler
cc = (cycler(color=list('rgb')) *
cycler(linestyle=['-', '--', '-.']))
for d in cc:
print(d)
Results in::
{'color': 'r', 'linestyle': '-'}
{'color': 'r', 'linestyle': '--'}
{'color': 'r', 'linestyle': '-.'}
{'color': 'g', 'linestyle': '-'}
{'color': 'g', 'linestyle': '--'}
{'color': 'g', 'linestyle': '-.'}
{'color': 'b', 'linestyle': '-'}
{'color': 'b', 'linestyle': '--'}
{'color': 'b', 'linestyle': '-.'}
"""
from __future__ import annotations
from collections.abc import Hashable, Iterable, Generator
import copy
from functools import reduce
from itertools import product, cycle
from operator import mul, add
# Dict, List, Union required for runtime cast calls
from typing import TypeVar, Generic, Callable, Union, Dict, List, Any, overload, cast
__version__ = "0.12.1"
K = TypeVar("K", bound=Hashable)
L = TypeVar("L", bound=Hashable)
V = TypeVar("V")
U = TypeVar("U")
def _process_keys(
left: Cycler[K, V] | Iterable[dict[K, V]],
right: Cycler[K, V] | Iterable[dict[K, V]] | None,
) -> set[K]:
"""
Helper function to compose cycler keys.
Parameters
----------
left, right : iterable of dictionaries or None
The cyclers to be composed.
Returns
-------
keys : set
The keys in the composition of the two cyclers.
"""
l_peek: dict[K, V] = next(iter(left)) if left != [] else {}
r_peek: dict[K, V] = next(iter(right)) if right is not None else {}
l_key: set[K] = set(l_peek.keys())
r_key: set[K] = set(r_peek.keys())
if l_key & r_key:
raise ValueError("Can not compose overlapping cycles")
return l_key | r_key
def concat(left: Cycler[K, V], right: Cycler[K, U]) -> Cycler[K, V | U]:
r"""
Concatenate `Cycler`\s, as if chained using `itertools.chain`.
The keys must match exactly.
Examples
--------
>>> num = cycler('a', range(3))
>>> let = cycler('a', 'abc')
>>> num.concat(let)
cycler('a', [0, 1, 2, 'a', 'b', 'c'])
Returns
-------
`Cycler`
The concatenated cycler.
"""
if left.keys != right.keys:
raise ValueError(
"Keys do not match:\n"
"\tIntersection: {both!r}\n"
"\tDisjoint: {just_one!r}".format(
both=left.keys & right.keys, just_one=left.keys ^ right.keys
)
)
_l = cast(Dict[K, List[Union[V, U]]], left.by_key())
_r = cast(Dict[K, List[Union[V, U]]], right.by_key())
return reduce(add, (_cycler(k, _l[k] + _r[k]) for k in left.keys))
class Cycler(Generic[K, V]):
"""
Composable cycles.
This class has compositions methods:
``+``
for 'inner' products (zip)
``+=``
in-place ``+``
``*``
for outer products (`itertools.product`) and integer multiplication
``*=``
in-place ``*``
and supports basic slicing via ``[]``.
Parameters
----------
left, right : Cycler or None
The 'left' and 'right' cyclers.
op : func or None
Function which composes the 'left' and 'right' cyclers.
"""
def __call__(self):
return cycle(self)
def __init__(
self,
left: Cycler[K, V] | Iterable[dict[K, V]] | None,
right: Cycler[K, V] | None = None,
op: Any = None,
):
"""
Semi-private init.
Do not use this directly, use `cycler` function instead.
"""
if isinstance(left, Cycler):
self._left: Cycler[K, V] | list[dict[K, V]] = Cycler(
left._left, left._right, left._op
)
elif left is not None:
# Need to copy the dictionary or else that will be a residual
# mutable that could lead to strange errors
self._left = [copy.copy(v) for v in left]
else:
self._left = []
if isinstance(right, Cycler):
self._right: Cycler[K, V] | None = Cycler(
right._left, right._right, right._op
)
else:
self._right = None
self._keys: set[K] = _process_keys(self._left, self._right)
self._op: Any = op
def __contains__(self, k):
return k in self._keys
@property
def keys(self) -> set[K]:
"""The keys this Cycler knows about."""
return set(self._keys)
def change_key(self, old: K, new: K) -> None:
"""
Change a key in this cycler to a new name.
Modification is performed in-place.
Does nothing if the old key is the same as the new key.
Raises a ValueError if the new key is already a key.
Raises a KeyError if the old key isn't a key.
"""
if old == new:
return
if new in self._keys:
raise ValueError(
f"Can't replace {old} with {new}, {new} is already a key"
)
if old not in self._keys:
raise KeyError(
f"Can't replace {old} with {new}, {old} is not a key"
)
self._keys.remove(old)
self._keys.add(new)
if self._right is not None and old in self._right.keys:
self._right.change_key(old, new)
# self._left should always be non-None
# if self._keys is non-empty.
elif isinstance(self._left, Cycler):
self._left.change_key(old, new)
else:
# It should be completely safe at this point to
# assume that the old key can be found in each
# iteration.
self._left = [{new: entry[old]} for entry in self._left]
@classmethod
def _from_iter(cls, label: K, itr: Iterable[V]) -> Cycler[K, V]:
"""
Class method to create 'base' Cycler objects
that do not have a 'right' or 'op' and for which
the 'left' object is not another Cycler.
Parameters
----------
label : hashable
The property key.
itr : iterable
Finite length iterable of the property values.
Returns
-------
`Cycler`
New 'base' cycler.
"""
ret: Cycler[K, V] = cls(None)
ret._left = list({label: v} for v in itr)
ret._keys = {label}
return ret
def __getitem__(self, key: slice) -> Cycler[K, V]:
# TODO : maybe add numpy style fancy slicing
if isinstance(key, slice):
trans = self.by_key()
return reduce(add, (_cycler(k, v[key]) for k, v in trans.items()))
else:
raise ValueError("Can only use slices with Cycler.__getitem__")
def __iter__(self) -> Generator[dict[K, V], None, None]:
if self._right is None:
for left in self._left:
yield dict(left)
else:
if self._op is None:
raise TypeError(
"Operation cannot be None when both left and right are defined"
)
for a, b in self._op(self._left, self._right):
out = {}
out.update(a)
out.update(b)
yield out
def __add__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]:
"""
Pair-wise combine two equal length cyclers (zip).
Parameters
----------
other : Cycler
"""
if len(self) != len(other):
raise ValueError(
f"Can only add equal length cycles, not {len(self)} and {len(other)}"
)
return Cycler(
cast(Cycler[Union[K, L], Union[V, U]], self),
cast(Cycler[Union[K, L], Union[V, U]], other),
zip
)
@overload
def __mul__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]:
...
@overload
def __mul__(self, other: int) -> Cycler[K, V]:
...
def __mul__(self, other):
"""
Outer product of two cyclers (`itertools.product`) or integer
multiplication.
Parameters
----------
other : Cycler or int
"""
if isinstance(other, Cycler):
return Cycler(
cast(Cycler[Union[K, L], Union[V, U]], self),
cast(Cycler[Union[K, L], Union[V, U]], other),
product
)
elif isinstance(other, int):
trans = self.by_key()
return reduce(
add, (_cycler(k, v * other) for k, v in trans.items())
)
else:
return NotImplemented
@overload
def __rmul__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]:
...
@overload
def __rmul__(self, other: int) -> Cycler[K, V]:
...
def __rmul__(self, other):
return self * other
def __len__(self) -> int:
op_dict: dict[Callable, Callable[[int, int], int]] = {zip: min, product: mul}
if self._right is None:
return len(self._left)
l_len = len(self._left)
r_len = len(self._right)
return op_dict[self._op](l_len, r_len)
# iadd and imul do not exapand the the type as the returns must be consistent with
# self, thus they flag as inconsistent with add/mul
def __iadd__(self, other: Cycler[K, V]) -> Cycler[K, V]: # type: ignore[misc]
"""
In-place pair-wise combine two equal length cyclers (zip).
Parameters
----------
other : Cycler
"""
if not isinstance(other, Cycler):
raise TypeError("Cannot += with a non-Cycler object")
# True shallow copy of self is fine since this is in-place
old_self = copy.copy(self)
self._keys = _process_keys(old_self, other)
self._left = old_self
self._op = zip
self._right = Cycler(other._left, other._right, other._op)
return self
def __imul__(self, other: Cycler[K, V] | int) -> Cycler[K, V]: # type: ignore[misc]
"""
In-place outer product of two cyclers (`itertools.product`).
Parameters
----------
other : Cycler
"""
if not isinstance(other, Cycler):
raise TypeError("Cannot *= with a non-Cycler object")
# True shallow copy of self is fine since this is in-place
old_self = copy.copy(self)
self._keys = _process_keys(old_self, other)
self._left = old_self
self._op = product
self._right = Cycler(other._left, other._right, other._op)
return self
def __eq__(self, other: object) -> bool:
if not isinstance(other, Cycler):
return False
if len(self) != len(other):
return False
if self.keys ^ other.keys:
return False
return all(a == b for a, b in zip(self, other))
__hash__ = None # type: ignore
def __repr__(self) -> str:
op_map = {zip: "+", product: "*"}
if self._right is None:
lab = self.keys.pop()
itr = list(v[lab] for v in self)
return f"cycler({lab!r}, {itr!r})"
else:
op = op_map.get(self._op, "?")
msg = "({left!r} {op} {right!r})"
return msg.format(left=self._left, op=op, right=self._right)
def _repr_html_(self) -> str:
# an table showing the value of each key through a full cycle
output = "
"
sorted_keys = sorted(self.keys, key=repr)
for key in sorted_keys:
output += f"
{key!r}
"
for d in iter(self):
output += "
"
for k in sorted_keys:
output += f"
{d[k]!r}
"
output += "
"
output += "
"
return output
def by_key(self) -> dict[K, list[V]]:
"""
Values by key.
This returns the transposed values of the cycler. Iterating
over a `Cycler` yields dicts with a single value for each key,
this method returns a `dict` of `list` which are the values
for the given key.
The returned value can be used to create an equivalent `Cycler`
using only `+`.
Returns
-------
transpose : dict
dict of lists of the values for each key.
"""
# TODO : sort out if this is a bottle neck, if there is a better way
# and if we care.
keys = self.keys
out: dict[K, list[V]] = {k: list() for k in keys}
for d in self:
for k in keys:
out[k].append(d[k])
return out
# for back compatibility
_transpose = by_key
def simplify(self) -> Cycler[K, V]:
"""
Simplify the cycler into a sum (but no products) of cyclers.
Returns
-------
simple : Cycler
"""
# TODO: sort out if it is worth the effort to make sure this is
# balanced. Currently it is is
# (((a + b) + c) + d) vs
# ((a + b) + (c + d))
# I would believe that there is some performance implications
trans = self.by_key()
return reduce(add, (_cycler(k, v) for k, v in trans.items()))
concat = concat
@overload
def cycler(arg: Cycler[K, V]) -> Cycler[K, V]:
...
@overload
def cycler(**kwargs: Iterable[V]) -> Cycler[str, V]:
...
@overload
def cycler(label: K, itr: Iterable[V]) -> Cycler[K, V]:
...
def cycler(*args, **kwargs):
"""
Create a new `Cycler` object from a single positional argument,
a pair of positional arguments, or the combination of keyword arguments.
cycler(arg)
cycler(label1=itr1[, label2=iter2[, ...]])
cycler(label, itr)
Form 1 simply copies a given `Cycler` object.
Form 2 composes a `Cycler` as an inner product of the
pairs of keyword arguments. In other words, all of the
iterables are cycled simultaneously, as if through zip().
Form 3 creates a `Cycler` from a label and an iterable.
This is useful for when the label cannot be a keyword argument
(e.g., an integer or a name that has a space in it).
Parameters
----------
arg : Cycler
Copy constructor for Cycler (does a shallow copy of iterables).
label : name
The property key. In the 2-arg form of the function,
the label can be any hashable object. In the keyword argument
form of the function, it must be a valid python identifier.
itr : iterable
Finite length iterable of the property values.
Can be a single-property `Cycler` that would
be like a key change, but as a shallow copy.
Returns
-------
cycler : Cycler
New `Cycler` for the given property
"""
if args and kwargs:
raise TypeError(
"cycler() can only accept positional OR keyword arguments -- not both."
)
if len(args) == 1:
if not isinstance(args[0], Cycler):
raise TypeError(
"If only one positional argument given, it must "
"be a Cycler instance."
)
return Cycler(args[0])
elif len(args) == 2:
return _cycler(*args)
elif len(args) > 2:
raise TypeError(
"Only a single Cycler can be accepted as the lone "
"positional argument. Use keyword arguments instead."
)
if kwargs:
return reduce(add, (_cycler(k, v) for k, v in kwargs.items()))
raise TypeError("Must have at least a positional OR keyword arguments")
def _cycler(label: K, itr: Iterable[V]) -> Cycler[K, V]:
"""
Create a new `Cycler` object from a property name and iterable of values.
Parameters
----------
label : hashable
The property key.
itr : iterable
Finite length iterable of the property values.
Returns
-------
cycler : Cycler
New `Cycler` for the given property
"""
if isinstance(itr, Cycler):
keys = itr.keys
if len(keys) != 1:
msg = "Can not create Cycler from a multi-property Cycler"
raise ValueError(msg)
lab = keys.pop()
# Doesn't need to be a new list because
# _from_iter() will be creating that new list anyway.
itr = (v[lab] for v in itr)
return Cycler._from_iter(label, itr)
venv\Lib\site-packages\dateutil\easter.py
# -*- coding: utf-8 -*-
"""
This module offers a generic Easter computing method for any given year, using
Western, Orthodox or Julian algorithms.
"""
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different Easter
calculation methods:
1. Original calculation in Julian calendar, valid in
dates after 326 AD
2. Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3. Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
* ``EASTER_JULIAN = 1``
* ``EASTER_ORTHODOX = 2``
* ``EASTER_WESTERN = 3``
The default method is method 3.
More about the algorithm may be found at:
`GM Arts: Easter Algorithms `_
and
`The Calendar FAQ: Easter `_
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g + 15) % 30
j = (y + y//4 + i) % 7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e + y//100 - 16 - (y//100 - 16)//4
else:
# New method
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d))
venv\Lib\site-packages\dateutil\relativedelta.py
# -*- coding: utf-8 -*-
import datetime
import calendar
import operator
from math import copysign
from six import integer_types
from warnings import warn
from ._common import weekday
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class relativedelta(object):
"""
The relativedelta type is designed to be applied to an existing datetime and
can replace specific components of that datetime, or represents an interval
of time.
It is based on the specification of the excellent work done by M.-A. Lemburg
in his
`mx.DateTime `_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an arithmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding arithmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc) available in the
relativedelta module. These instances may receive a parameter N,
specifying the Nth weekday, which could be positive or negative
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+1. You can also use an integer, where 0=MO. This argument is always
relative e.g. if the calculated date is already Monday, using MO(1)
or MO(-1) won't change the day. To effectively make it absolute, use
it in combination with the day argument (e.g. day=1, MO(1) for first
Monday of the month).
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
There are relative and absolute forms of the keyword
arguments. The plural is relative, and the singular is
absolute. For each argument in the order below, the absolute form
is applied first (by setting each attribute to that value) and
then the relative form (by adding the value to the attribute).
The order of attributes considered when this relativedelta is
added to a datetime is:
1. Year
2. Month
3. Day
4. Hours
5. Minutes
6. Seconds
7. Microseconds
Finally, weekday is applied, using the rule described above.
For example
>>> from datetime import datetime
>>> from dateutil.relativedelta import relativedelta, MO
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
>>> dt + delta
datetime.datetime(2018, 4, 2, 14, 37)
First, the day is set to 1 (the first of the month), then 25 hours
are added, to get to the 2nd day and 14th hour, finally the
weekday is applied, but since the 2nd is already a Monday there is
no effect.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
# Relative information
self.years = int(years)
self.months = int(months)
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return int(self.days / 7.0)
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=(other.year if other.year is not None
else self.year),
month=(other.month if other.month is not None
else self.month),
day=(other.day if other.day is not None
else self.day),
weekday=(other.weekday if other.weekday is not None
else self.weekday),
hour=(other.hour if other.hour is not None
else self.hour),
minute=(other.minute if other.minute is not None
else self.minute),
second=(other.second if other.second is not None
else self.second),
microsecond=(other.microsecond if other.microsecond
is not None else
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=(self.year if self.year is not None
else other.year),
month=(self.month if self.month is not None else
other.month),
day=(self.day if self.day is not None else
other.day),
weekday=(self.weekday if self.weekday is not None else
other.weekday),
hour=(self.hour if self.hour is not None else
other.hour),
minute=(self.minute if self.minute is not None else
other.minute),
second=(self.second if self.second is not None else
other.second),
microsecond=(self.microsecond if self.microsecond
is not None else
other.microsecond))
def __abs__(self):
return self.__class__(years=abs(self.years),
months=abs(self.months),
days=abs(self.days),
hours=abs(self.hours),
minutes=abs(self.minutes),
seconds=abs(self.seconds),
microseconds=abs(self.microseconds),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __hash__(self):
return hash((
self.weekday,
self.years,
self.months,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
self.leapdays,
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
))
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
def _sign(x):
return int(copysign(1, x))
# vim:ts=4:sw=4:et
venv\Lib\site-packages\dateutil\rrule.py
# -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC `_,
including support for caching of results.
"""
import calendar
import datetime
import heapq
import itertools
import re
import sys
from functools import wraps
# For warning about deprecation of until and count
from warnings import warn
from six import advance_iterator, integer_types
from six.moves import _thread, range
from ._common import weekday as weekdaybase
try:
from math import gcd
except ImportError:
from fractions import gcd
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(weekdaybase):
"""
This version of weekday does not allow n = 0.
"""
def __init__(self, wkday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n==0")
super(weekday, self).__init__(wkday, n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
def _invalidates_cache(f):
"""
Decorator for rruleset methods which may invalidate the
cached length.
"""
@wraps(f)
def inner_func(self, *args, **kwargs):
rv = f(self, *args, **kwargs)
self._invalidate_cache()
return rv
return inner_func
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._invalidate_cache()
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _invalidate_cache(self):
if self._cache is not None:
self._cache = []
self._cache_complete = False
self._cache_gen = self._iter()
if self._cache_lock.locked():
self._cache_lock.release()
self._len = None
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penalty.
def count(self):
""" Returns the number of recurrences in this set. It will have go
through the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
.. note::
Per RFC section 3.3.10, recurrence instances falling on invalid dates
and times are ignored rather than coerced:
Recurrence rules may generate recurrence instances with an invalid
date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
on a day where the local time is moved forward by an hour at 1:00
AM). Such recurrence instances MUST be ignored and MUST NOT be
counted as part of the recurrence set.
This can lead to possibly surprising behavior when, for example, the
start date occurs at the end of the month:
>>> from dateutil.rrule import rrule, MONTHLY
>>> from datetime import datetime
>>> start_date = datetime(2014, 12, 31)
>>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
... # doctest: +NORMALIZE_WHITESPACE
[datetime.datetime(2014, 12, 31, 0, 0),
datetime.datetime(2015, 1, 31, 0, 0),
datetime.datetime(2015, 3, 31, 0, 0),
datetime.datetime(2015, 5, 31, 0, 0)]
Additionally, it supports the following keyword arguments:
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
If given, this determines how many occurrences will be generated.
.. note::
As of version 2.5.0, the use of the keyword ``until`` in conjunction
with ``count`` is deprecated, to make sure ``dateutil`` is fully
compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count``
**must not** occur in the same call to ``rrule``.
:param until:
If given, this must be a datetime instance specifying the upper-bound
limit of the recurrence. The last recurrence in the rule is the greatest
datetime that is less than or equal to the value specified in the
``until`` parameter.
.. note::
As of version 2.5.0, the use of the keyword ``until`` in conjunction
with ``count`` is deprecated, to make sure ``dateutil`` is fully
compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count``
**must not** occur in the same call to ``rrule``.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
if until and until.tzinfo:
dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0)
else:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
# Cache the original byxxx rules, if they are provided, as the _byxxx
# attributes do not necessarily map to the inputs, and this can be
# a problem in generating the strings. Only store things if they've
# been supplied (the string retrieval will just use .get())
self._original_rule = {}
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if self._dtstart and self._until:
if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
# According to RFC5545 Section 3.3.10:
# https://tools.ietf.org/html/rfc5545#section-3.3.10
#
# > If the "DTSTART" property is specified as a date with UTC
# > time or a date with local time and time zone reference,
# > then the UNTIL rule part MUST be specified as a date with
# > UTC time.
raise ValueError(
'RRULE UNTIL values must be specified in UTC when DTSTART '
'is timezone-aware'
)
if count is not None and until:
warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
" and has been deprecated in dateutil. Future versions will "
"raise an error.", DeprecationWarning)
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if self._bysetpos:
self._original_rule['bysetpos'] = self._bysetpos
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
self._original_rule['bymonth'] = None
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == MONTHLY:
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == WEEKLY:
byweekday = dtstart.weekday()
self._original_rule['byweekday'] = None
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
if 'bymonth' not in self._original_rule:
self._original_rule['bymonth'] = self._bymonth
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
self._original_rule['byyearday'] = self._byyearday
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
self._original_rule['byeaster'] = self._byeaster
else:
self._byeaster = None
# bymonthday
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
bymonthday = set(bymonthday) # Ensure it's unique
self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
# Storing positive numbers first, then negative numbers
if 'bymonthday' not in self._original_rule:
self._original_rule['bymonthday'] = tuple(
itertools.chain(self._bymonthday, self._bynmonthday))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
self._original_rule['byweekno'] = self._byweekno
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
orig_byweekday = [weekday(x) for x in self._byweekday]
else:
orig_byweekday = ()
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
else:
orig_bynweekday = ()
if 'byweekday' not in self._original_rule:
self._original_rule['byweekday'] = tuple(itertools.chain(
orig_byweekday, orig_bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = {dtstart.hour}
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
self._original_rule['byhour'] = self._byhour
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = {dtstart.minute}
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
self._original_rule['byminute'] = self._byminute
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
self._original_rule['bysecond'] = self._bysecond
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def __str__(self):
"""
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC5545, except for the
dateutil-specific extension BYEASTER.
"""
output = []
h, m, s = [None] * 3
if self._dtstart:
output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
h, m, s = self._dtstart.timetuple()[3:6]
parts = ['FREQ=' + FREQNAMES[self._freq]]
if self._interval != 1:
parts.append('INTERVAL=' + str(self._interval))
if self._wkst:
parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
if self._count is not None:
parts.append('COUNT=' + str(self._count))
if self._until:
parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
if self._original_rule.get('byweekday') is not None:
# The str() method on weekday objects doesn't generate
# RFC5545-compliant strings, so we should modify that.
original_rule = dict(self._original_rule)
wday_strings = []
for wday in original_rule['byweekday']:
if wday.n:
wday_strings.append('{n:+d}{wday}'.format(
n=wday.n,
wday=repr(wday)[0:2]))
else:
wday_strings.append(repr(wday))
original_rule['byweekday'] = wday_strings
else:
original_rule = self._original_rule
partfmt = '{name}={vals}'
for name, key in [('BYSETPOS', 'bysetpos'),
('BYMONTH', 'bymonth'),
('BYMONTHDAY', 'bymonthday'),
('BYYEARDAY', 'byyearday'),
('BYWEEKNO', 'byweekno'),
('BYDAY', 'byweekday'),
('BYHOUR', 'byhour'),
('BYMINUTE', 'byminute'),
('BYSECOND', 'bysecond'),
('BYEASTER', 'byeaster')]:
value = original_rule.get(key)
if value:
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
for v in value))))
output.append('RRULE:' + ';'.join(parts))
return '\n'.join(output)
def replace(self, **kwargs):
"""Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified."""
new_kwargs = {"interval": self._interval,
"count": self._count,
"dtstart": self._dtstart,
"freq": self._freq,
"until": self._until,
"wkst": self._wkst,
"cache": False if self._cache is None else True }
new_kwargs.update(self._original_rule)
new_kwargs.update(kwargs)
return rrule(**new_kwargs)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal + i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399 - (hour * 3600 + minute * 60 + second))
// interval) * interval)
rep_rate = (24 * 3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
if self.genlist[0] is self:
heapq.heappop(self.genlist)
else:
self.genlist.remove(self)
heapq.heapify(self.genlist)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
@_invalidates_cache
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
@_invalidates_cache
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
@_invalidates_cache
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
@_invalidates_cache
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
lastdt = None
total = 0
heapq.heapify(rlist)
heapq.heapify(exlist)
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exitem = exlist[0]
advance_iterator(exitem)
if exlist and exlist[0] is exitem:
heapq.heapreplace(exlist, exitem)
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
if rlist and rlist[0] is ritem:
heapq.heapreplace(rlist, ritem)
self._len = total
class _rrulestr(object):
""" Parses a string representation of a recurrence rule or set of
recurrence rules.
:param s:
Required, a string defining one or more recurrence rules.
:param dtstart:
If given, used as the default recurrence start if not specified in the
rule string.
:param cache:
If set ``True`` caching of results will be enabled, improving
performance of multiple queries considerably.
:param unfold:
If set ``True`` indicates that a rule string is split over more
than one line and should be joined before processing.
:param forceset:
If set ``True`` forces a :class:`dateutil.rrule.rruleset` to
be returned.
:param compatible:
If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime.datetime` object is returned.
:param tzids:
If given, a callable or mapping used to retrieve a
:class:`datetime.tzinfo` from a string representation.
Defaults to :func:`dateutil.tz.gettz`.
:param tzinfos:
Additional time zone names / aliases which may be present in a string
representation. See :func:`dateutil.parser.parse` for more
information.
:return:
Returns a :class:`dateutil.rrule.rruleset` or
:class:`dateutil.rrule.rrule`
"""
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_date_value(self, date_value, parms, rule_tzids,
ignoretz, tzids, tzinfos):
global parser
if not parser:
from dateutil import parser
datevals = []
value_found = False
TZID = None
for parm in parms:
if parm.startswith("TZID="):
try:
tzkey = rule_tzids[parm.split('TZID=')[-1]]
except KeyError:
continue
if tzids is None:
from . import tz
tzlookup = tz.gettz
elif callable(tzids):
tzlookup = tzids
else:
tzlookup = getattr(tzids, 'get', None)
if tzlookup is None:
msg = ('tzids must be a callable, mapping, or None, '
'not %s' % tzids)
raise ValueError(msg)
TZID = tzlookup(tzkey)
continue
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found
# only once.
if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}:
raise ValueError("unsupported parm: " + parm)
else:
if value_found:
msg = ("Duplicate value parameter found in: " + parm)
raise ValueError(msg)
value_found = True
for datestr in date_value.split(','):
date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)
if TZID is not None:
if date.tzinfo is None:
date = date.replace(tzinfo=TZID)
else:
raise ValueError('DTSTART/EXDATE specifies multiple timezone')
datevals.append(date)
return datevals
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzids=None,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
TZID_NAMES = dict(map(
lambda x: (x.upper(), x),
re.findall('TZID=(?P[^:]+):', s)
))
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
exdatevals.extend(
self._parse_date_value(value, parms,
TZID_NAMES, ignoretz,
tzids, tzinfos)
)
elif name == "DTSTART":
dtvals = self._parse_date_value(value, parms, TZID_NAMES,
ignoretz, tzids, tzinfos)
if len(dtvals) != 1:
raise ValueError("Multiple DTSTART values specified:" +
value)
dtstart = dtvals[0]
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
rset.exdate(value)
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
venv\Lib\site-packages\dateutil\tzwin.py
# tzwin has moved to dateutil.tz.win
from .tz.win import *
venv\Lib\site-packages\dateutil\utils.py
# -*- coding: utf-8 -*-
"""
This module offers general convenience and utility functions for dealing with
datetimes.
.. versionadded:: 2.7.0
"""
from __future__ import unicode_literals
from datetime import datetime, time
def today(tzinfo=None):
"""
Returns a :py:class:`datetime` representing the current day at midnight
:param tzinfo:
The time zone to attach (also used to determine the current day).
:return:
A :py:class:`datetime.datetime` object representing the current day
at midnight.
"""
dt = datetime.now(tzinfo)
return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
def default_tzinfo(dt, tzinfo):
"""
Sets the ``tzinfo`` parameter on naive datetimes only
This is useful for example when you are provided a datetime that may have
either an implicit or explicit time zone, such as when parsing a time zone
string.
.. doctest::
>>> from dateutil.tz import tzoffset
>>> from dateutil.parser import parse
>>> from dateutil.utils import default_tzinfo
>>> dflt_tz = tzoffset("EST", -18000)
>>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
2014-01-01 12:30:00+00:00
>>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
2014-01-01 12:30:00-05:00
:param dt:
The datetime on which to replace the time zone
:param tzinfo:
The :py:class:`datetime.tzinfo` subclass instance to assign to
``dt`` if (and only if) it is naive.
:return:
Returns an aware :py:class:`datetime.datetime`.
"""
if dt.tzinfo is not None:
return dt
else:
return dt.replace(tzinfo=tzinfo)
def within_delta(dt1, dt2, delta):
"""
Useful for comparing two datetimes that may have a negligible difference
to be considered equal.
"""
delta = abs(delta)
difference = dt1 - dt2
return -delta <= difference <= delta
venv\Lib\site-packages\dateutil\_common.py
"""
Common code used in multiple modules.
"""
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __hash__(self):
return hash((
self.weekday,
self.n,
))
def __ne__(self, other):
return not (self == other)
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
# vim:ts=4:sw=4:et
venv\Lib\site-packages\dateutil\_version.py
# file generated by setuptools_scm
# don't change, don't track in version control
__version__ = version = '2.9.0.post0'
__version_tuple__ = version_tuple = (2, 9, 0)
venv\Lib\site-packages\dateutil\__init__.py
# -*- coding: utf-8 -*-
import sys
try:
from ._version import version as __version__
except ImportError:
__version__ = 'unknown'
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
'utils', 'zoneinfo']
def __getattr__(name):
import importlib
if name in __all__:
return importlib.import_module("." + name, __name__)
raise AttributeError(
"module {!r} has not attribute {!r}".format(__name__, name)
)
def __dir__():
# __dir__ should include all the lazy-importable modules as well.
return [x for x in globals() if x not in sys.modules] + __all__
venv\Lib\site-packages\fontTools\afmLib.py
"""Module for reading and writing AFM (Adobe Font Metrics) files.
Note that this has been designed to read in AFM files generated by Fontographer
and has not been tested on many other files. In particular, it does not
implement the whole Adobe AFM specification [#f1]_ but, it should read most
"common" AFM files.
Here is an example of using `afmLib` to read, modify and write an AFM file:
>>> from fontTools.afmLib import AFM
>>> f = AFM("Tests/afmLib/data/TestAFM.afm")
>>>
>>> # Accessing a pair gets you the kern value
>>> f[("V","A")]
-60
>>>
>>> # Accessing a glyph name gets you metrics
>>> f["A"]
(65, 668, (8, -25, 660, 666))
>>> # (charnum, width, bounding box)
>>>
>>> # Accessing an attribute gets you metadata
>>> f.FontName
'TestFont-Regular'
>>> f.FamilyName
'TestFont'
>>> f.Weight
'Regular'
>>> f.XHeight
500
>>> f.Ascender
750
>>>
>>> # Attributes and items can also be set
>>> f[("A","V")] = -150 # Tighten kerning
>>> f.FontName = "TestFont Squished"
>>>
>>> # And the font written out again (remove the # in front)
>>> #f.write("testfont-squished.afm")
.. rubric:: Footnotes
.. [#f1] `Adobe Technote 5004 `_,
Adobe Font Metrics File Format Specification.
"""
import re
# every single line starts with a "word"
identifierRE = re.compile(r"^([A-Za-z]+).*")
# regular expression to parse char lines
charRE = re.compile(
r"(-?\d+)" # charnum
r"\s*;\s*WX\s+" # ; WX
r"(-?\d+)" # width
r"\s*;\s*N\s+" # ; N
r"([.A-Za-z0-9_]+)" # charname
r"\s*;\s*B\s+" # ; B
r"(-?\d+)" # left
r"\s+"
r"(-?\d+)" # bottom
r"\s+"
r"(-?\d+)" # right
r"\s+"
r"(-?\d+)" # top
r"\s*;\s*" # ;
)
# regular expression to parse kerning lines
kernRE = re.compile(
r"([.A-Za-z0-9_]+)" # leftchar
r"\s+"
r"([.A-Za-z0-9_]+)" # rightchar
r"\s+"
r"(-?\d+)" # value
r"\s*"
)
# regular expressions to parse composite info lines of the form:
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
compositeRE = re.compile(
r"([.A-Za-z0-9_]+)" # char name
r"\s+"
r"(\d+)" # number of parts
r"\s*;\s*"
)
componentRE = re.compile(
r"PCC\s+" # PPC
r"([.A-Za-z0-9_]+)" # base char name
r"\s+"
r"(-?\d+)" # x offset
r"\s+"
r"(-?\d+)" # y offset
r"\s*;\s*"
)
preferredAttributeOrder = [
"FontName",
"FullName",
"FamilyName",
"Weight",
"ItalicAngle",
"IsFixedPitch",
"FontBBox",
"UnderlinePosition",
"UnderlineThickness",
"Version",
"Notice",
"EncodingScheme",
"CapHeight",
"XHeight",
"Ascender",
"Descender",
]
class error(Exception):
pass
class AFM(object):
_attrs = None
_keywords = [
"StartFontMetrics",
"EndFontMetrics",
"StartCharMetrics",
"EndCharMetrics",
"StartKernData",
"StartKernPairs",
"EndKernPairs",
"EndKernData",
"StartComposites",
"EndComposites",
]
def __init__(self, path=None):
"""AFM file reader.
Instantiating an object with a path name will cause the file to be opened,
read, and parsed. Alternatively the path can be left unspecified, and a
file can be parsed later with the :meth:`read` method."""
self._attrs = {}
self._chars = {}
self._kerning = {}
self._index = {}
self._comments = []
self._composites = {}
if path is not None:
self.read(path)
def read(self, path):
"""Opens, reads and parses a file."""
lines = readlines(path)
for line in lines:
if not line.strip():
continue
m = identifierRE.match(line)
if m is None:
raise error("syntax error in AFM file: " + repr(line))
pos = m.regs[1][1]
word = line[:pos]
rest = line[pos:].strip()
if word in self._keywords:
continue
if word == "C":
self.parsechar(rest)
elif word == "KPX":
self.parsekernpair(rest)
elif word == "CC":
self.parsecomposite(rest)
else:
self.parseattr(word, rest)
def parsechar(self, rest):
m = charRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
charname = things[2]
del things[2]
charnum, width, l, b, r, t = (int(thing) for thing in things)
self._chars[charname] = charnum, width, (l, b, r, t)
def parsekernpair(self, rest):
m = kernRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
things = []
for fr, to in m.regs[1:]:
things.append(rest[fr:to])
leftchar, rightchar, value = things
value = int(value)
self._kerning[(leftchar, rightchar)] = value
def parseattr(self, word, rest):
if word == "FontBBox":
l, b, r, t = [int(thing) for thing in rest.split()]
self._attrs[word] = l, b, r, t
elif word == "Comment":
self._comments.append(rest)
else:
try:
value = int(rest)
except (ValueError, OverflowError):
self._attrs[word] = rest
else:
self._attrs[word] = value
def parsecomposite(self, rest):
m = compositeRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
charname = m.group(1)
ncomponents = int(m.group(2))
rest = rest[m.regs[0][1] :]
components = []
while True:
m = componentRE.match(rest)
if m is None:
raise error("syntax error in AFM file: " + repr(rest))
basechar = m.group(1)
xoffset = int(m.group(2))
yoffset = int(m.group(3))
components.append((basechar, xoffset, yoffset))
rest = rest[m.regs[0][1] :]
if not rest:
break
assert len(components) == ncomponents
self._composites[charname] = components
def write(self, path, sep="\r"):
"""Writes out an AFM font to the given path."""
import time
lines = [
"StartFontMetrics 2.0",
"Comment Generated by afmLib; at %s"
% (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
]
# write comments, assuming (possibly wrongly!) they should
# all appear at the top
for comment in self._comments:
lines.append("Comment " + comment)
# write attributes, first the ones we know about, in
# a preferred order
attrs = self._attrs
for attr in preferredAttributeOrder:
if attr in attrs:
value = attrs[attr]
if attr == "FontBBox":
value = "%s %s %s %s" % value
lines.append(attr + " " + str(value))
# then write the attributes we don't know about,
# in alphabetical order
items = sorted(attrs.items())
for attr, value in items:
if attr in preferredAttributeOrder:
continue
lines.append(attr + " " + str(value))
# write char metrics
lines.append("StartCharMetrics " + repr(len(self._chars)))
items = [
(charnum, (charname, width, box))
for charname, (charnum, width, box) in self._chars.items()
]
def myKey(a):
"""Custom key function to make sure unencoded chars (-1)
end up at the end of the list after sorting."""
if a[0] == -1:
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
return a
items.sort(key=myKey)
for charnum, (charname, width, (l, b, r, t)) in items:
lines.append(
"C %d ; WX %d ; N %s ; B %d %d %d %d ;"
% (charnum, width, charname, l, b, r, t)
)
lines.append("EndCharMetrics")
# write kerning info
lines.append("StartKernData")
lines.append("StartKernPairs " + repr(len(self._kerning)))
items = sorted(self._kerning.items())
for (leftchar, rightchar), value in items:
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
lines.append("EndKernPairs")
lines.append("EndKernData")
if self._composites:
composites = sorted(self._composites.items())
lines.append("StartComposites %s" % len(self._composites))
for charname, components in composites:
line = "CC %s %s ;" % (charname, len(components))
for basechar, xoffset, yoffset in components:
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
lines.append(line)
lines.append("EndComposites")
lines.append("EndFontMetrics")
writelines(path, lines, sep)
def has_kernpair(self, pair):
"""Returns `True` if the given glyph pair (specified as a tuple) exists
in the kerning dictionary."""
return pair in self._kerning
def kernpairs(self):
"""Returns a list of all kern pairs in the kerning dictionary."""
return list(self._kerning.keys())
def has_char(self, char):
"""Returns `True` if the given glyph exists in the font."""
return char in self._chars
def chars(self):
"""Returns a list of all glyph names in the font."""
return list(self._chars.keys())
def comments(self):
"""Returns all comments from the file."""
return self._comments
def addComment(self, comment):
"""Adds a new comment to the file."""
self._comments.append(comment)
def addComposite(self, glyphName, components):
"""Specifies that the glyph `glyphName` is made up of the given components.
The components list should be of the following form::
[
(glyphname, xOffset, yOffset),
...
]
"""
self._composites[glyphName] = components
def __getattr__(self, attr):
if attr in self._attrs:
return self._attrs[attr]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
self.__dict__[attr] = value
else:
self._attrs[attr] = value
def __delattr__(self, attr):
# all attrs *not* starting with "_" are consider to be AFM keywords
if attr[:1] == "_":
try:
del self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
else:
try:
del self._attrs[attr]
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, return the kernpair
return self._kerning[key]
else:
# return the metrics instead
return self._chars[key]
def __setitem__(self, key, value):
if isinstance(key, tuple):
# key is a tuple, set kernpair
self._kerning[key] = value
else:
# set char metrics
self._chars[key] = value
def __delitem__(self, key):
if isinstance(key, tuple):
# key is a tuple, del kernpair
del self._kerning[key]
else:
# del char metrics
del self._chars[key]
def __repr__(self):
if hasattr(self, "FullName"):
return "" % self.FullName
else:
return "" % id(self)
def readlines(path):
with open(path, "r", encoding="ascii") as f:
data = f.read()
return data.splitlines()
def writelines(path, lines, sep="\r"):
with open(path, "w", encoding="ascii", newline=sep) as f:
f.write("\n".join(lines) + "\n")
if __name__ == "__main__":
import EasyDialogs
path = EasyDialogs.AskFileForOpen()
if path:
afm = AFM(path)
char = "A"
if afm.has_char(char):
print(afm[char]) # print charnum, width and boundingbox
pair = ("A", "V")
if afm.has_kernpair(pair):
print(afm[pair]) # print kerning value for pair
print(afm.Version) # various other afm entries have become attributes
print(afm.Weight)
# afm.comments() returns a list of all Comment lines found in the AFM
print(afm.comments())
# print afm.chars()
# print afm.kernpairs()
print(afm)
afm.write(path + ".muck")
venv\Lib\site-packages\fontTools\agl.py
# -*- coding: utf-8 -*-
# The tables below are taken from
# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/glyphlist.txt
# and
# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/aglfn.txt
"""
Interface to the Adobe Glyph List
This module exists to convert glyph names from the Adobe Glyph List
to their Unicode equivalents. Example usage:
>>> from fontTools.agl import toUnicode
>>> toUnicode("nahiragana")
'な'
It also contains two dictionaries, ``UV2AGL`` and ``AGL2UV``, which map from
Unicode codepoints to AGL names and vice versa:
>>> import fontTools
>>> fontTools.agl.UV2AGL[ord("?")]
'question'
>>> fontTools.agl.AGL2UV["wcircumflex"]
373
This is used by fontTools when it has to construct glyph names for a font which
doesn't include any (e.g. format 3.0 post tables).
"""
from fontTools.misc.textTools import tostr
import re
_aglText = """\
# -----------------------------------------------------------
# Copyright 2002-2019 Adobe (http://www.adobe.com/).
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# Neither the name of Adobe nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------
# Name: Adobe Glyph List
# Table version: 2.0
# Date: September 20, 2002
# URL: https://github.com/adobe-type-tools/agl-aglfn
#
# Format: two semicolon-delimited fields:
# (1) glyph name--upper/lowercase letters and digits
# (2) Unicode scalar value--four uppercase hexadecimal digits
#
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
# END
"""
_aglfnText = """\
# -----------------------------------------------------------
# Copyright 2002-2019 Adobe (http://www.adobe.com/).
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# Neither the name of Adobe nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------
# Name: Adobe Glyph List For New Fonts
# Table version: 1.7
# Date: November 6, 2008
# URL: https://github.com/adobe-type-tools/agl-aglfn
#
# Description:
#
# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph
# names that are recommended for new fonts, which are compatible with
# the AGL (Adobe Glyph List) Specification, and which should be used
# as described in Section 6 of that document. AGLFN comprises the set
# of glyph names from AGL that map via the AGL Specification rules to
# the semantically correct UV (Unicode Value). For example, "Asmall"
# is omitted because AGL maps this glyph name to the PUA (Private Use
# Area) value U+F761, rather than to the UV that maps from the glyph
# name "A." Also omitted is "ffi," because AGL maps this to the
# Alphabetic Presentation Forms value U+FB03, rather than decomposing
# it into the following sequence of three UVs: U+0066, U+0066, and
# U+0069. The name "arrowvertex" has been omitted because this glyph
# now has a real UV, and AGL is now incorrect in mapping it to the PUA
# value U+F8E6. If you do not find an appropriate name for your glyph
# in this list, then please refer to Section 6 of the AGL
# Specification.
#
# Format: three semicolon-delimited fields:
# (1) Standard UV or CUS UV--four uppercase hexadecimal digits
# (2) Glyph name--upper/lowercase letters and digits
# (3) Character names: Unicode character names for standard UVs, and
# descriptive names for CUS UVs--uppercase letters, hyphen, and
# space
#
# The records are sorted by glyph name in increasing ASCII order,
# entries with the same glyph name are sorted in decreasing priority
# order, the UVs and Unicode character names are provided for
# convenience, lines starting with "#" are comments, and blank lines
# should be ignored.
#
# Revision History:
#
# 1.7 [6 November 2008]
# - Reverted to the original 1.4 and earlier mappings for Delta,
# Omega, and mu.
# - Removed mappings for "afii" names. These should now be assigned
# "uni" names.
# - Removed mappings for "commaaccent" names. These should now be
# assigned "uni" names.
#
# 1.6 [30 January 2006]
# - Completed work intended in 1.5.
#
# 1.5 [23 November 2005]
# - Removed duplicated block at end of file.
# - Changed mappings:
# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA
# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA
# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU
# - Corrected statement above about why "ffi" is omitted.
#
# 1.4 [24 September 2003]
# - Changed version to 1.4, to avoid confusion with the AGL 1.3.
# - Fixed spelling errors in the header.
# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode
# value in some fonts.
#
# 1.1 [17 April 2003]
# - Renamed [Tt]cedilla back to [Tt]commaaccent.
#
# 1.0 [31 January 2003]
# - Original version.
# - Derived from the AGLv1.2 by:
# removing the PUA area codes;
# removing duplicate Unicode mappings; and
# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla"
#
0041;A;LATIN CAPITAL LETTER A
00C6;AE;LATIN CAPITAL LETTER AE
01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE
00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE
0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE
00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX
00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS
00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE
0391;Alpha;GREEK CAPITAL LETTER ALPHA
0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS
0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON
0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK
00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE
01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE
0042;B;LATIN CAPITAL LETTER B
0392;Beta;GREEK CAPITAL LETTER BETA
0043;C;LATIN CAPITAL LETTER C
0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE
010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON
00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA
0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX
010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE
03A7;Chi;GREEK CAPITAL LETTER CHI
0044;D;LATIN CAPITAL LETTER D
010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON
0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE
2206;Delta;INCREMENT
0045;E;LATIN CAPITAL LETTER E
00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE
0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE
011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON
00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX
00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS
0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE
00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE
0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON
014A;Eng;LATIN CAPITAL LETTER ENG
0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK
0395;Epsilon;GREEK CAPITAL LETTER EPSILON
0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS
0397;Eta;GREEK CAPITAL LETTER ETA
0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS
00D0;Eth;LATIN CAPITAL LETTER ETH
20AC;Euro;EURO SIGN
0046;F;LATIN CAPITAL LETTER F
0047;G;LATIN CAPITAL LETTER G
0393;Gamma;GREEK CAPITAL LETTER GAMMA
011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE
01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON
011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX
0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE
0048;H;LATIN CAPITAL LETTER H
25CF;H18533;BLACK CIRCLE
25AA;H18543;BLACK SMALL SQUARE
25AB;H18551;WHITE SMALL SQUARE
25A1;H22073;WHITE SQUARE
0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE
0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX
0049;I;LATIN CAPITAL LETTER I
0132;IJ;LATIN CAPITAL LIGATURE IJ
00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE
012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE
00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX
00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS
0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE
2111;Ifraktur;BLACK-LETTER CAPITAL I
00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE
012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON
012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK
0399;Iota;GREEK CAPITAL LETTER IOTA
03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS
0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE
004A;J;LATIN CAPITAL LETTER J
0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX
004B;K;LATIN CAPITAL LETTER K
039A;Kappa;GREEK CAPITAL LETTER KAPPA
004C;L;LATIN CAPITAL LETTER L
0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE
039B;Lambda;GREEK CAPITAL LETTER LAMDA
013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON
013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT
0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE
004D;M;LATIN CAPITAL LETTER M
039C;Mu;GREEK CAPITAL LETTER MU
004E;N;LATIN CAPITAL LETTER N
0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE
0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON
00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE
039D;Nu;GREEK CAPITAL LETTER NU
004F;O;LATIN CAPITAL LETTER O
0152;OE;LATIN CAPITAL LIGATURE OE
00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE
014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE
00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX
00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS
00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE
01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN
0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON
2126;Omega;OHM SIGN
038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS
039F;Omicron;GREEK CAPITAL LETTER OMICRON
038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS
00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE
01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE
0050;P;LATIN CAPITAL LETTER P
03A6;Phi;GREEK CAPITAL LETTER PHI
03A0;Pi;GREEK CAPITAL LETTER PI
03A8;Psi;GREEK CAPITAL LETTER PSI
0051;Q;LATIN CAPITAL LETTER Q
0052;R;LATIN CAPITAL LETTER R
0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE
0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON
211C;Rfraktur;BLACK-LETTER CAPITAL R
03A1;Rho;GREEK CAPITAL LETTER RHO
0053;S;LATIN CAPITAL LETTER S
250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT
2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT
2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT
2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT
253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL
251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT
2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT
2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL
2502;SF110000;BOX DRAWINGS LIGHT VERTICAL
2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT
2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL
2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT
255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT
255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT
2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT
2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL
2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL
256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE
0160;Scaron;LATIN CAPITAL LETTER S WITH CARON
015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA
015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX
03A3;Sigma;GREEK CAPITAL LETTER SIGMA
0054;T;LATIN CAPITAL LETTER T
03A4;Tau;GREEK CAPITAL LETTER TAU
0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE
0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON
0398;Theta;GREEK CAPITAL LETTER THETA
00DE;Thorn;LATIN CAPITAL LETTER THORN
0055;U;LATIN CAPITAL LETTER U
00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE
016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE
00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX
00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS
00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE
01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN
0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON
0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK
03A5;Upsilon;GREEK CAPITAL LETTER UPSILON
03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL
03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS
016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE
0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE
0056;V;LATIN CAPITAL LETTER V
0057;W;LATIN CAPITAL LETTER W
1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE
0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX
1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS
1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE
0058;X;LATIN CAPITAL LETTER X
039E;Xi;GREEK CAPITAL LETTER XI
0059;Y;LATIN CAPITAL LETTER Y
00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE
0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS
1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE
005A;Z;LATIN CAPITAL LETTER Z
0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE
017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON
017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE
0396;Zeta;GREEK CAPITAL LETTER ZETA
0061;a;LATIN SMALL LETTER A
00E1;aacute;LATIN SMALL LETTER A WITH ACUTE
0103;abreve;LATIN SMALL LETTER A WITH BREVE
00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX
00B4;acute;ACUTE ACCENT
0301;acutecomb;COMBINING ACUTE ACCENT
00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS
00E6;ae;LATIN SMALL LETTER AE
01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE
00E0;agrave;LATIN SMALL LETTER A WITH GRAVE
2135;aleph;ALEF SYMBOL
03B1;alpha;GREEK SMALL LETTER ALPHA
03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS
0101;amacron;LATIN SMALL LETTER A WITH MACRON
0026;ampersand;AMPERSAND
2220;angle;ANGLE
2329;angleleft;LEFT-POINTING ANGLE BRACKET
232A;angleright;RIGHT-POINTING ANGLE BRACKET
0387;anoteleia;GREEK ANO TELEIA
0105;aogonek;LATIN SMALL LETTER A WITH OGONEK
2248;approxequal;ALMOST EQUAL TO
00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE
01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
2194;arrowboth;LEFT RIGHT ARROW
21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW
21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW
21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW
21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW
21D1;arrowdblup;UPWARDS DOUBLE ARROW
2193;arrowdown;DOWNWARDS ARROW
2190;arrowleft;LEFTWARDS ARROW
2192;arrowright;RIGHTWARDS ARROW
2191;arrowup;UPWARDS ARROW
2195;arrowupdn;UP DOWN ARROW
21A8;arrowupdnbse;UP DOWN ARROW WITH BASE
005E;asciicircum;CIRCUMFLEX ACCENT
007E;asciitilde;TILDE
002A;asterisk;ASTERISK
2217;asteriskmath;ASTERISK OPERATOR
0040;at;COMMERCIAL AT
00E3;atilde;LATIN SMALL LETTER A WITH TILDE
0062;b;LATIN SMALL LETTER B
005C;backslash;REVERSE SOLIDUS
007C;bar;VERTICAL LINE
03B2;beta;GREEK SMALL LETTER BETA
2588;block;FULL BLOCK
007B;braceleft;LEFT CURLY BRACKET
007D;braceright;RIGHT CURLY BRACKET
005B;bracketleft;LEFT SQUARE BRACKET
005D;bracketright;RIGHT SQUARE BRACKET
02D8;breve;BREVE
00A6;brokenbar;BROKEN BAR
2022;bullet;BULLET
0063;c;LATIN SMALL LETTER C
0107;cacute;LATIN SMALL LETTER C WITH ACUTE
02C7;caron;CARON
21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS
010D;ccaron;LATIN SMALL LETTER C WITH CARON
00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA
0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX
010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE
00B8;cedilla;CEDILLA
00A2;cent;CENT SIGN
03C7;chi;GREEK SMALL LETTER CHI
25CB;circle;WHITE CIRCLE
2297;circlemultiply;CIRCLED TIMES
2295;circleplus;CIRCLED PLUS
02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT
2663;club;BLACK CLUB SUIT
003A;colon;COLON
20A1;colonmonetary;COLON SIGN
002C;comma;COMMA
2245;congruent;APPROXIMATELY EQUAL TO
00A9;copyright;COPYRIGHT SIGN
00A4;currency;CURRENCY SIGN
0064;d;LATIN SMALL LETTER D
2020;dagger;DAGGER
2021;daggerdbl;DOUBLE DAGGER
010F;dcaron;LATIN SMALL LETTER D WITH CARON
0111;dcroat;LATIN SMALL LETTER D WITH STROKE
00B0;degree;DEGREE SIGN
03B4;delta;GREEK SMALL LETTER DELTA
2666;diamond;BLACK DIAMOND SUIT
00A8;dieresis;DIAERESIS
0385;dieresistonos;GREEK DIALYTIKA TONOS
00F7;divide;DIVISION SIGN
2593;dkshade;DARK SHADE
2584;dnblock;LOWER HALF BLOCK
0024;dollar;DOLLAR SIGN
20AB;dong;DONG SIGN
02D9;dotaccent;DOT ABOVE
0323;dotbelowcomb;COMBINING DOT BELOW
0131;dotlessi;LATIN SMALL LETTER DOTLESS I
22C5;dotmath;DOT OPERATOR
0065;e;LATIN SMALL LETTER E
00E9;eacute;LATIN SMALL LETTER E WITH ACUTE
0115;ebreve;LATIN SMALL LETTER E WITH BREVE
011B;ecaron;LATIN SMALL LETTER E WITH CARON
00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX
00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS
0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE
00E8;egrave;LATIN SMALL LETTER E WITH GRAVE
0038;eight;DIGIT EIGHT
2208;element;ELEMENT OF
2026;ellipsis;HORIZONTAL ELLIPSIS
0113;emacron;LATIN SMALL LETTER E WITH MACRON
2014;emdash;EM DASH
2205;emptyset;EMPTY SET
2013;endash;EN DASH
014B;eng;LATIN SMALL LETTER ENG
0119;eogonek;LATIN SMALL LETTER E WITH OGONEK
03B5;epsilon;GREEK SMALL LETTER EPSILON
03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS
003D;equal;EQUALS SIGN
2261;equivalence;IDENTICAL TO
212E;estimated;ESTIMATED SYMBOL
03B7;eta;GREEK SMALL LETTER ETA
03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS
00F0;eth;LATIN SMALL LETTER ETH
0021;exclam;EXCLAMATION MARK
203C;exclamdbl;DOUBLE EXCLAMATION MARK
00A1;exclamdown;INVERTED EXCLAMATION MARK
2203;existential;THERE EXISTS
0066;f;LATIN SMALL LETTER F
2640;female;FEMALE SIGN
2012;figuredash;FIGURE DASH
25A0;filledbox;BLACK SQUARE
25AC;filledrect;BLACK RECTANGLE
0035;five;DIGIT FIVE
215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS
0192;florin;LATIN SMALL LETTER F WITH HOOK
0034;four;DIGIT FOUR
2044;fraction;FRACTION SLASH
20A3;franc;FRENCH FRANC SIGN
0067;g;LATIN SMALL LETTER G
03B3;gamma;GREEK SMALL LETTER GAMMA
011F;gbreve;LATIN SMALL LETTER G WITH BREVE
01E7;gcaron;LATIN SMALL LETTER G WITH CARON
011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX
0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE
00DF;germandbls;LATIN SMALL LETTER SHARP S
2207;gradient;NABLA
0060;grave;GRAVE ACCENT
0300;gravecomb;COMBINING GRAVE ACCENT
003E;greater;GREATER-THAN SIGN
2265;greaterequal;GREATER-THAN OR EQUAL TO
00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK
203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0068;h;LATIN SMALL LETTER H
0127;hbar;LATIN SMALL LETTER H WITH STROKE
0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX
2665;heart;BLACK HEART SUIT
0309;hookabovecomb;COMBINING HOOK ABOVE
2302;house;HOUSE
02DD;hungarumlaut;DOUBLE ACUTE ACCENT
002D;hyphen;HYPHEN-MINUS
0069;i;LATIN SMALL LETTER I
00ED;iacute;LATIN SMALL LETTER I WITH ACUTE
012D;ibreve;LATIN SMALL LETTER I WITH BREVE
00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX
00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS
00EC;igrave;LATIN SMALL LETTER I WITH GRAVE
0133;ij;LATIN SMALL LIGATURE IJ
012B;imacron;LATIN SMALL LETTER I WITH MACRON
221E;infinity;INFINITY
222B;integral;INTEGRAL
2321;integralbt;BOTTOM HALF INTEGRAL
2320;integraltp;TOP HALF INTEGRAL
2229;intersection;INTERSECTION
25D8;invbullet;INVERSE BULLET
25D9;invcircle;INVERSE WHITE CIRCLE
263B;invsmileface;BLACK SMILING FACE
012F;iogonek;LATIN SMALL LETTER I WITH OGONEK
03B9;iota;GREEK SMALL LETTER IOTA
03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA
0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS
0129;itilde;LATIN SMALL LETTER I WITH TILDE
006A;j;LATIN SMALL LETTER J
0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX
006B;k;LATIN SMALL LETTER K
03BA;kappa;GREEK SMALL LETTER KAPPA
0138;kgreenlandic;LATIN SMALL LETTER KRA
006C;l;LATIN SMALL LETTER L
013A;lacute;LATIN SMALL LETTER L WITH ACUTE
03BB;lambda;GREEK SMALL LETTER LAMDA
013E;lcaron;LATIN SMALL LETTER L WITH CARON
0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT
003C;less;LESS-THAN SIGN
2264;lessequal;LESS-THAN OR EQUAL TO
258C;lfblock;LEFT HALF BLOCK
20A4;lira;LIRA SIGN
2227;logicaland;LOGICAL AND
00AC;logicalnot;NOT SIGN
2228;logicalor;LOGICAL OR
017F;longs;LATIN SMALL LETTER LONG S
25CA;lozenge;LOZENGE
0142;lslash;LATIN SMALL LETTER L WITH STROKE
2591;ltshade;LIGHT SHADE
006D;m;LATIN SMALL LETTER M
00AF;macron;MACRON
2642;male;MALE SIGN
2212;minus;MINUS SIGN
2032;minute;PRIME
00B5;mu;MICRO SIGN
00D7;multiply;MULTIPLICATION SIGN
266A;musicalnote;EIGHTH NOTE
266B;musicalnotedbl;BEAMED EIGHTH NOTES
006E;n;LATIN SMALL LETTER N
0144;nacute;LATIN SMALL LETTER N WITH ACUTE
0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
0148;ncaron;LATIN SMALL LETTER N WITH CARON
0039;nine;DIGIT NINE
2209;notelement;NOT AN ELEMENT OF
2260;notequal;NOT EQUAL TO
2284;notsubset;NOT A SUBSET OF
00F1;ntilde;LATIN SMALL LETTER N WITH TILDE
03BD;nu;GREEK SMALL LETTER NU
0023;numbersign;NUMBER SIGN
006F;o;LATIN SMALL LETTER O
00F3;oacute;LATIN SMALL LETTER O WITH ACUTE
014F;obreve;LATIN SMALL LETTER O WITH BREVE
00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX
00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS
0153;oe;LATIN SMALL LIGATURE OE
02DB;ogonek;OGONEK
00F2;ograve;LATIN SMALL LETTER O WITH GRAVE
01A1;ohorn;LATIN SMALL LETTER O WITH HORN
0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE
014D;omacron;LATIN SMALL LETTER O WITH MACRON
03C9;omega;GREEK SMALL LETTER OMEGA
03D6;omega1;GREEK PI SYMBOL
03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS
03BF;omicron;GREEK SMALL LETTER OMICRON
03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS
0031;one;DIGIT ONE
2024;onedotenleader;ONE DOT LEADER
215B;oneeighth;VULGAR FRACTION ONE EIGHTH
00BD;onehalf;VULGAR FRACTION ONE HALF
00BC;onequarter;VULGAR FRACTION ONE QUARTER
2153;onethird;VULGAR FRACTION ONE THIRD
25E6;openbullet;WHITE BULLET
00AA;ordfeminine;FEMININE ORDINAL INDICATOR
00BA;ordmasculine;MASCULINE ORDINAL INDICATOR
221F;orthogonal;RIGHT ANGLE
00F8;oslash;LATIN SMALL LETTER O WITH STROKE
01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE
00F5;otilde;LATIN SMALL LETTER O WITH TILDE
0070;p;LATIN SMALL LETTER P
00B6;paragraph;PILCROW SIGN
0028;parenleft;LEFT PARENTHESIS
0029;parenright;RIGHT PARENTHESIS
2202;partialdiff;PARTIAL DIFFERENTIAL
0025;percent;PERCENT SIGN
002E;period;FULL STOP
00B7;periodcentered;MIDDLE DOT
22A5;perpendicular;UP TACK
2030;perthousand;PER MILLE SIGN
20A7;peseta;PESETA SIGN
03C6;phi;GREEK SMALL LETTER PHI
03D5;phi1;GREEK PHI SYMBOL
03C0;pi;GREEK SMALL LETTER PI
002B;plus;PLUS SIGN
00B1;plusminus;PLUS-MINUS SIGN
211E;prescription;PRESCRIPTION TAKE
220F;product;N-ARY PRODUCT
2282;propersubset;SUBSET OF
2283;propersuperset;SUPERSET OF
221D;proportional;PROPORTIONAL TO
03C8;psi;GREEK SMALL LETTER PSI
0071;q;LATIN SMALL LETTER Q
003F;question;QUESTION MARK
00BF;questiondown;INVERTED QUESTION MARK
0022;quotedbl;QUOTATION MARK
201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK
201C;quotedblleft;LEFT DOUBLE QUOTATION MARK
201D;quotedblright;RIGHT DOUBLE QUOTATION MARK
2018;quoteleft;LEFT SINGLE QUOTATION MARK
201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK
2019;quoteright;RIGHT SINGLE QUOTATION MARK
201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK
0027;quotesingle;APOSTROPHE
0072;r;LATIN SMALL LETTER R
0155;racute;LATIN SMALL LETTER R WITH ACUTE
221A;radical;SQUARE ROOT
0159;rcaron;LATIN SMALL LETTER R WITH CARON
2286;reflexsubset;SUBSET OF OR EQUAL TO
2287;reflexsuperset;SUPERSET OF OR EQUAL TO
00AE;registered;REGISTERED SIGN
2310;revlogicalnot;REVERSED NOT SIGN
03C1;rho;GREEK SMALL LETTER RHO
02DA;ring;RING ABOVE
2590;rtblock;RIGHT HALF BLOCK
0073;s;LATIN SMALL LETTER S
015B;sacute;LATIN SMALL LETTER S WITH ACUTE
0161;scaron;LATIN SMALL LETTER S WITH CARON
015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA
015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX
2033;second;DOUBLE PRIME
00A7;section;SECTION SIGN
003B;semicolon;SEMICOLON
0037;seven;DIGIT SEVEN
215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS
2592;shade;MEDIUM SHADE
03C3;sigma;GREEK SMALL LETTER SIGMA
03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA
223C;similar;TILDE OPERATOR
0036;six;DIGIT SIX
002F;slash;SOLIDUS
263A;smileface;WHITE SMILING FACE
0020;space;SPACE
2660;spade;BLACK SPADE SUIT
00A3;sterling;POUND SIGN
220B;suchthat;CONTAINS AS MEMBER
2211;summation;N-ARY SUMMATION
263C;sun;WHITE SUN WITH RAYS
0074;t;LATIN SMALL LETTER T
03C4;tau;GREEK SMALL LETTER TAU
0167;tbar;LATIN SMALL LETTER T WITH STROKE
0165;tcaron;LATIN SMALL LETTER T WITH CARON
2234;therefore;THEREFORE
03B8;theta;GREEK SMALL LETTER THETA
03D1;theta1;GREEK THETA SYMBOL
00FE;thorn;LATIN SMALL LETTER THORN
0033;three;DIGIT THREE
215C;threeeighths;VULGAR FRACTION THREE EIGHTHS
00BE;threequarters;VULGAR FRACTION THREE QUARTERS
02DC;tilde;SMALL TILDE
0303;tildecomb;COMBINING TILDE
0384;tonos;GREEK TONOS
2122;trademark;TRADE MARK SIGN
25BC;triagdn;BLACK DOWN-POINTING TRIANGLE
25C4;triaglf;BLACK LEFT-POINTING POINTER
25BA;triagrt;BLACK RIGHT-POINTING POINTER
25B2;triagup;BLACK UP-POINTING TRIANGLE
0032;two;DIGIT TWO
2025;twodotenleader;TWO DOT LEADER
2154;twothirds;VULGAR FRACTION TWO THIRDS
0075;u;LATIN SMALL LETTER U
00FA;uacute;LATIN SMALL LETTER U WITH ACUTE
016D;ubreve;LATIN SMALL LETTER U WITH BREVE
00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX
00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS
00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE
01B0;uhorn;LATIN SMALL LETTER U WITH HORN
0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE
016B;umacron;LATIN SMALL LETTER U WITH MACRON
005F;underscore;LOW LINE
2017;underscoredbl;DOUBLE LOW LINE
222A;union;UNION
2200;universal;FOR ALL
0173;uogonek;LATIN SMALL LETTER U WITH OGONEK
2580;upblock;UPPER HALF BLOCK
03C5;upsilon;GREEK SMALL LETTER UPSILON
03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA
03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS
016F;uring;LATIN SMALL LETTER U WITH RING ABOVE
0169;utilde;LATIN SMALL LETTER U WITH TILDE
0076;v;LATIN SMALL LETTER V
0077;w;LATIN SMALL LETTER W
1E83;wacute;LATIN SMALL LETTER W WITH ACUTE
0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX
1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS
2118;weierstrass;SCRIPT CAPITAL P
1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE
0078;x;LATIN SMALL LETTER X
03BE;xi;GREEK SMALL LETTER XI
0079;y;LATIN SMALL LETTER Y
00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE
0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX
00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS
00A5;yen;YEN SIGN
1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE
007A;z;LATIN SMALL LETTER Z
017A;zacute;LATIN SMALL LETTER Z WITH ACUTE
017E;zcaron;LATIN SMALL LETTER Z WITH CARON
017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE
0030;zero;DIGIT ZERO
03B6;zeta;GREEK SMALL LETTER ZETA
# END
"""
class AGLError(Exception):
pass
LEGACY_AGL2UV = {}
AGL2UV = {}
UV2AGL = {}
def _builddicts():
import re
lines = _aglText.splitlines()
parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$")
for line in lines:
if not line or line[:1] == "#":
continue
m = parseAGL_RE.match(line)
if not m:
raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
unicodes = m.group(2)
assert len(unicodes) % 5 == 4
unicodes = [int(unicode, 16) for unicode in unicodes.split()]
glyphName = tostr(m.group(1))
LEGACY_AGL2UV[glyphName] = unicodes
lines = _aglfnText.splitlines()
parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$")
for line in lines:
if not line or line[:1] == "#":
continue
m = parseAGLFN_RE.match(line)
if not m:
raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20]))
unicode = m.group(1)
assert len(unicode) == 4
unicode = int(unicode, 16)
glyphName = tostr(m.group(2))
AGL2UV[glyphName] = unicode
UV2AGL[unicode] = glyphName
_builddicts()
def toUnicode(glyph, isZapfDingbats=False):
"""Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'ſt'``
If ``isZapfDingbats`` is ``True``, the implementation recognizes additional
glyph names (as required by the AGL specification).
"""
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping
#
# 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0]
# 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_")
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components]
return "".join(result)
def _glyphComponentToUnicode(component, isZapfDingbats):
# If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats),
# and the component is in the ITC Zapf Dingbats Glyph List, then
# map it to the corresponding character in that list.
dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None
if dingbat:
return dingbat
# Otherwise, if the component is in AGL, then map it
# to the corresponding character in that list.
uchars = LEGACY_AGL2UV.get(component)
if uchars:
return "".join(map(chr, uchars))
# Otherwise, if the component is of the form "uni" (U+0075,
# U+006E, and U+0069) followed by a sequence of uppercase
# hexadecimal digits (0–9 and A–F, meaning U+0030 through
# U+0039 and U+0041 through U+0046), if the length of that
# sequence is a multiple of four, and if each group of four
# digits represents a value in the ranges 0000 through D7FF
# or E000 through FFFF, then interpret each as a Unicode scalar
# value and map the component to the string made of those
# scalar values. Note that the range and digit-length
# restrictions mean that the "uni" glyph name prefix can be
# used only with UVs in the Basic Multilingual Plane (BMP).
uni = _uniToUnicode(component)
if uni:
return uni
# Otherwise, if the component is of the form "u" (U+0075)
# followed by a sequence of four to six uppercase hexadecimal
# digits (0–9 and A–F, meaning U+0030 through U+0039 and
# U+0041 through U+0046), and those digits represents a value
# in the ranges 0000 through D7FF or E000 through 10FFFF, then
# interpret it as a Unicode scalar value and map the component
# to the string made of this scalar value.
uni = _uToUnicode(component)
if uni:
return uni
# Otherwise, map the component to an empty string.
return ""
# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt
_AGL_ZAPF_DINGBATS = (
" ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀"
"❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇"
"①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔"
"↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰"
)
def _zapfDingbatsToUnicode(glyph):
"""Helper for toUnicode()."""
if len(glyph) < 2 or glyph[0] != "a":
return None
try:
gid = int(glyph[1:])
except ValueError:
return None
if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS):
return None
uchar = _AGL_ZAPF_DINGBATS[gid]
return uchar if uchar != " " else None
_re_uni = re.compile("^uni([0-9A-F]+)$")
def _uniToUnicode(component):
"""Helper for toUnicode() to handle "uniABCD" components."""
match = _re_uni.match(component)
if match is None:
return None
digits = match.group(1)
if len(digits) % 4 != 0:
return None
chars = [int(digits[i : i + 4], 16) for i in range(0, len(digits), 4)]
if any(c >= 0xD800 and c <= 0xDFFF for c in chars):
# The AGL specification explicitly excluded surrogate pairs.
return None
return "".join([chr(c) for c in chars])
_re_u = re.compile("^u([0-9A-F]{4,6})$")
def _uToUnicode(component):
"""Helper for toUnicode() to handle "u1ABCD" components."""
match = _re_u.match(component)
if match is None:
return None
digits = match.group(1)
try:
value = int(digits, 16)
except ValueError:
return None
if (value >= 0x0000 and value <= 0xD7FF) or (value >= 0xE000 and value <= 0x10FFFF):
return chr(value)
return None
venv\Lib\site-packages\fontTools\fontBuilder.py
__all__ = ["FontBuilder"]
"""
This module is *experimental*, meaning it still may evolve and change.
The `FontBuilder` class is a convenient helper to construct working TTF or
OTF fonts from scratch.
Note that the various setup methods cannot be called in arbitrary order,
due to various interdependencies between OpenType tables. Here is an order
that works:
fb = FontBuilder(...)
fb.setupGlyphOrder(...)
fb.setupCharacterMap(...)
fb.setupGlyf(...) --or-- fb.setupCFF(...)
fb.setupHorizontalMetrics(...)
fb.setupHorizontalHeader()
fb.setupNameTable(...)
fb.setupOS2()
fb.addOpenTypeFeatures(...)
fb.setupPost()
fb.save(...)
Here is how to build a minimal TTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.qCurveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=True)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = TTGlyphPen(None)
drawTestGlyph(pen)
glyph = pen.glyph()
glyphs = {".notdef": glyph, "space": glyph, "A": glyph, "a": glyph, ".null": glyph}
fb.setupGlyf(glyphs)
metrics = {}
glyphTable = fb.font["glyf"]
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, glyphTable[gn].xMin)
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=-200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.ttf")
```
And here's how to build a minimal OTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.t2CharStringPen import T2CharStringPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.curveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=False)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = T2CharStringPen(600, None)
drawTestGlyph(pen)
charString = pen.getCharString()
charStrings = {
".notdef": charString,
"space": charString,
"A": charString,
"a": charString,
".null": charString,
}
fb.setupCFF(nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {})
lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()}
metrics = {}
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, lsb[gn])
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.otf")
```
"""
from .ttLib import TTFont, newTable
from .ttLib.tables._c_m_a_p import cmap_classes
from .ttLib.tables._g_l_y_f import flagCubic
from .ttLib.tables.O_S_2f_2 import Panose
from .misc.timeTools import timestampNow
import struct
from collections import OrderedDict
_headDefaults = dict(
tableVersion=1.0,
fontRevision=1.0,
checkSumAdjustment=0,
magicNumber=0x5F0F3CF5,
flags=0x0003,
unitsPerEm=1000,
created=0,
modified=0,
xMin=0,
yMin=0,
xMax=0,
yMax=0,
macStyle=0,
lowestRecPPEM=3,
fontDirectionHint=2,
indexToLocFormat=0,
glyphDataFormat=0,
)
_maxpDefaultsTTF = dict(
tableVersion=0x00010000,
numGlyphs=0,
maxPoints=0,
maxContours=0,
maxCompositePoints=0,
maxCompositeContours=0,
maxZones=2,
maxTwilightPoints=0,
maxStorage=0,
maxFunctionDefs=0,
maxInstructionDefs=0,
maxStackElements=0,
maxSizeOfInstructions=0,
maxComponentElements=0,
maxComponentDepth=0,
)
_maxpDefaultsOTF = dict(
tableVersion=0x00005000,
numGlyphs=0,
)
_postDefaults = dict(
formatType=3.0,
italicAngle=0,
underlinePosition=0,
underlineThickness=0,
isFixedPitch=0,
minMemType42=0,
maxMemType42=0,
minMemType1=0,
maxMemType1=0,
)
_hheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceWidthMax=0,
minLeftSideBearing=0,
minRightSideBearing=0,
xMaxExtent=0,
caretSlopeRise=1,
caretSlopeRun=0,
caretOffset=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
metricDataFormat=0,
numberOfHMetrics=0,
)
_vheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceHeightMax=0,
minTopSideBearing=0,
minBottomSideBearing=0,
yMaxExtent=0,
caretSlopeRise=0,
caretSlopeRun=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
reserved4=0,
metricDataFormat=0,
numberOfVMetrics=0,
)
_nameIDs = dict(
copyright=0,
familyName=1,
styleName=2,
uniqueFontIdentifier=3,
fullName=4,
version=5,
psName=6,
trademark=7,
manufacturer=8,
designer=9,
description=10,
vendorURL=11,
designerURL=12,
licenseDescription=13,
licenseInfoURL=14,
# reserved = 15,
typographicFamily=16,
typographicSubfamily=17,
compatibleFullName=18,
sampleText=19,
postScriptCIDFindfontName=20,
wwsFamilyName=21,
wwsSubfamilyName=22,
lightBackgroundPalette=23,
darkBackgroundPalette=24,
variationsPostScriptNamePrefix=25,
)
# to insert in setupNameTable doc string:
# print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1])))
_panoseDefaults = Panose()
_OS2Defaults = dict(
version=3,
xAvgCharWidth=0,
usWeightClass=400,
usWidthClass=5,
fsType=0x0004, # default: Preview & Print embedding
ySubscriptXSize=0,
ySubscriptYSize=0,
ySubscriptXOffset=0,
ySubscriptYOffset=0,
ySuperscriptXSize=0,
ySuperscriptYSize=0,
ySuperscriptXOffset=0,
ySuperscriptYOffset=0,
yStrikeoutSize=0,
yStrikeoutPosition=0,
sFamilyClass=0,
panose=_panoseDefaults,
ulUnicodeRange1=0,
ulUnicodeRange2=0,
ulUnicodeRange3=0,
ulUnicodeRange4=0,
achVendID="????",
fsSelection=0,
usFirstCharIndex=0,
usLastCharIndex=0,
sTypoAscender=0,
sTypoDescender=0,
sTypoLineGap=0,
usWinAscent=0,
usWinDescent=0,
ulCodePageRange1=0,
ulCodePageRange2=0,
sxHeight=0,
sCapHeight=0,
usDefaultChar=0, # .notdef
usBreakChar=32, # space
usMaxContext=0,
usLowerOpticalPointSize=0,
usUpperOpticalPointSize=0,
)
class FontBuilder(object):
def __init__(self, unitsPerEm=None, font=None, isTTF=True, glyphDataFormat=0):
"""Initialize a FontBuilder instance.
If the `font` argument is not given, a new `TTFont` will be
constructed, and `unitsPerEm` must be given. If `isTTF` is True,
the font will be a glyf-based TTF; if `isTTF` is False it will be
a CFF-based OTF.
The `glyphDataFormat` argument corresponds to the `head` table field
that defines the format of the TrueType `glyf` table (default=0).
TrueType glyphs historically can only contain quadratic splines and static
components, but there's a proposal to add support for cubic Bezier curves as well
as variable composites/components at
https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md
You can experiment with the new features by setting `glyphDataFormat` to 1.
A ValueError is raised if `glyphDataFormat` is left at 0 but glyphs are added
that contain cubic splines or varcomposites. This is to prevent accidentally
creating fonts that are incompatible with existing TrueType implementations.
If `font` is given, it must be a `TTFont` instance and `unitsPerEm`
must _not_ be given. The `isTTF` and `glyphDataFormat` arguments will be ignored.
"""
if font is None:
self.font = TTFont(recalcTimestamp=False)
self.isTTF = isTTF
now = timestampNow()
assert unitsPerEm is not None
self.setupHead(
unitsPerEm=unitsPerEm,
created=now,
modified=now,
glyphDataFormat=glyphDataFormat,
)
self.setupMaxp()
else:
assert unitsPerEm is None
self.font = font
self.isTTF = "glyf" in font
def save(self, file):
"""Save the font. The 'file' argument can be either a pathname or a
writable file object.
"""
self.font.save(file)
def _initTableWithValues(self, tableTag, defaults, values):
table = self.font[tableTag] = newTable(tableTag)
for k, v in defaults.items():
setattr(table, k, v)
for k, v in values.items():
setattr(table, k, v)
return table
def _updateTableWithValues(self, tableTag, values):
table = self.font[tableTag]
for k, v in values.items():
setattr(table, k, v)
def setupHead(self, **values):
"""Create a new `head` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("head", _headDefaults, values)
def updateHead(self, **values):
"""Update the head table with the fields and values passed as
keyword arguments.
"""
self._updateTableWithValues("head", values)
def setupGlyphOrder(self, glyphOrder):
"""Set the glyph order for the font."""
self.font.setGlyphOrder(glyphOrder)
def setupCharacterMap(self, cmapping, uvs=None, allowFallback=False):
"""Build the `cmap` table for the font. The `cmapping` argument should
be a dict mapping unicode code points as integers to glyph names.
The `uvs` argument, when passed, must be a list of tuples, describing
Unicode Variation Sequences. These tuples have three elements:
(unicodeValue, variationSelector, glyphName)
`unicodeValue` and `variationSelector` are integer code points.
`glyphName` may be None, to indicate this is the default variation.
Text processors will then use the cmap to find the glyph name.
Each Unicode Variation Sequence should be an officially supported
sequence, but this is not policed.
"""
subTables = []
highestUnicode = max(cmapping) if cmapping else 0
if highestUnicode > 0xFFFF:
cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000)
subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10)
subTables.append(subTable_3_10)
else:
cmapping_3_1 = cmapping
format = 4
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
try:
subTable_3_1.compile(self.font)
except struct.error:
# format 4 overflowed, fall back to format 12
if not allowFallback:
raise ValueError(
"cmap format 4 subtable overflowed; sort glyph order by unicode to fix."
)
format = 12
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
subTables.append(subTable_3_1)
subTable_0_3 = buildCmapSubTable(cmapping_3_1, format, 0, 3)
subTables.append(subTable_0_3)
if uvs is not None:
uvsDict = {}
for unicodeValue, variationSelector, glyphName in uvs:
if cmapping.get(unicodeValue) == glyphName:
# this is a default variation
glyphName = None
if variationSelector not in uvsDict:
uvsDict[variationSelector] = []
uvsDict[variationSelector].append((unicodeValue, glyphName))
uvsSubTable = buildCmapSubTable({}, 14, 0, 5)
uvsSubTable.uvsDict = uvsDict
subTables.append(uvsSubTable)
self.font["cmap"] = newTable("cmap")
self.font["cmap"].tableVersion = 0
self.font["cmap"].tables = subTables
def setupNameTable(self, nameStrings, windows=True, mac=True):
"""Create the `name` table for the font. The `nameStrings` argument must
be a dict, mapping nameIDs or descriptive names for the nameIDs to name
record values. A value is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
By default, both Windows (platformID=3) and Macintosh (platformID=1) name
records are added, unless any of `windows` or `mac` arguments is False.
The following descriptive names are available for nameIDs:
copyright (nameID 0)
familyName (nameID 1)
styleName (nameID 2)
uniqueFontIdentifier (nameID 3)
fullName (nameID 4)
version (nameID 5)
psName (nameID 6)
trademark (nameID 7)
manufacturer (nameID 8)
designer (nameID 9)
description (nameID 10)
vendorURL (nameID 11)
designerURL (nameID 12)
licenseDescription (nameID 13)
licenseInfoURL (nameID 14)
typographicFamily (nameID 16)
typographicSubfamily (nameID 17)
compatibleFullName (nameID 18)
sampleText (nameID 19)
postScriptCIDFindfontName (nameID 20)
wwsFamilyName (nameID 21)
wwsSubfamilyName (nameID 22)
lightBackgroundPalette (nameID 23)
darkBackgroundPalette (nameID 24)
variationsPostScriptNamePrefix (nameID 25)
"""
nameTable = self.font["name"] = newTable("name")
nameTable.names = []
for nameName, nameValue in nameStrings.items():
if isinstance(nameName, int):
nameID = nameName
else:
nameID = _nameIDs[nameName]
if isinstance(nameValue, str):
nameValue = dict(en=nameValue)
nameTable.addMultilingualName(
nameValue, ttFont=self.font, nameID=nameID, windows=windows, mac=mac
)
def setupOS2(self, **values):
"""Create a new `OS/2` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("OS/2", _OS2Defaults, values)
if "xAvgCharWidth" not in values:
assert (
"hmtx" in self.font
), "the 'hmtx' table must be setup before the 'OS/2' table"
self.font["OS/2"].recalcAvgCharWidth(self.font)
if not (
"ulUnicodeRange1" in values
or "ulUnicodeRange2" in values
or "ulUnicodeRange3" in values
or "ulUnicodeRange3" in values
):
assert (
"cmap" in self.font
), "the 'cmap' table must be setup before the 'OS/2' table"
self.font["OS/2"].recalcUnicodeRanges(self.font)
def setupCFF(self, psName, fontInfo, charStringsDict, privateDict):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 1
fontSet.minor = 0
fontSet.otFont = self.font
fontSet.fontNames = [psName]
fontSet.topDictIndex = TopDictIndex()
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fdSelect = None
fdArray = None
topDict = TopDict()
topDict.charset = self.font.getGlyphOrder()
topDict.Private = private
topDict.GlobalSubrs = fontSet.GlobalSubrs
for key, value in fontInfo.items():
setattr(topDict, key, value)
if "FontMatrix" not in fontInfo:
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
charStrings = CharStrings(
None, topDict.charset, globalSubrs, private, fdSelect, fdArray
)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF "] = newTable("CFF ")
self.font["CFF "].cff = fontSet
def setupCFF2(self, charStringsDict, fdArrayList=None, regions=None):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
FDArrayIndex,
FontDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 2
fontSet.minor = 0
cff2GetGlyphOrder = self.font.getGlyphOrder
fontSet.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None)
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
if fdArrayList is None:
fdArrayList = [{}]
fdSelect = None
fdArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = globalSubrs
for privateDict in fdArrayList:
fontDict = FontDict()
fontDict.setCFF2(True)
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fontDict.Private = private
fdArray.append(fontDict)
topDict = TopDict()
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
topDict.FDArray = fdArray
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
private = fdArray[0].Private
charStrings = CharStrings(None, None, globalSubrs, private, fdSelect, fdArray)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF2"] = newTable("CFF2")
self.font["CFF2"].cff = fontSet
if regions:
self.setupCFF2Regions(regions)
def setupCFF2Regions(self, regions):
from .varLib.builder import buildVarRegionList, buildVarData, buildVarStore
from .cffLib import VarStoreData
assert "fvar" in self.font, "fvar must to be set up first"
assert "CFF2" in self.font, "CFF2 must to be set up first"
axisTags = [a.axisTag for a in self.font["fvar"].axes]
varRegionList = buildVarRegionList(regions, axisTags)
varData = buildVarData(list(range(len(regions))), None, optimize=False)
varStore = buildVarStore(varRegionList, [varData])
vstore = VarStoreData(otVarStore=varStore)
topDict = self.font["CFF2"].cff.topDictIndex[0]
topDict.VarStore = vstore
for fontDict in topDict.FDArray:
fontDict.Private.vstore = vstore
def setupGlyf(self, glyphs, calcGlyphBounds=True, validateGlyphFormat=True):
"""Create the `glyf` table from a dict, that maps glyph names
to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example
as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`.
If `calcGlyphBounds` is True, the bounds of all glyphs will be
calculated. Only pass False if your glyph objects already have
their bounding box values set.
If `validateGlyphFormat` is True, raise ValueError if any of the glyphs contains
cubic curves or is a variable composite but head.glyphDataFormat=0.
Set it to False to skip the check if you know in advance all the glyphs are
compatible with the specified glyphDataFormat.
"""
assert self.isTTF
if validateGlyphFormat and self.font["head"].glyphDataFormat == 0:
for name, g in glyphs.items():
if g.numberOfContours > 0 and any(f & flagCubic for f in g.flags):
raise ValueError(
f"Glyph {name!r} has cubic Bezier outlines, but glyphDataFormat=0; "
"either convert to quadratics with cu2qu or set glyphDataFormat=1."
)
self.font["loca"] = newTable("loca")
self.font["glyf"] = newTable("glyf")
self.font["glyf"].glyphs = glyphs
if hasattr(self.font, "glyphOrder"):
self.font["glyf"].glyphOrder = self.font.glyphOrder
if calcGlyphBounds:
self.calcGlyphBounds()
def setupFvar(self, axes, instances):
"""Adds an font variations table to the font.
Args:
axes (list): See below.
instances (list): See below.
``axes`` should be a list of axes, with each axis either supplied as
a py:class:`.designspaceLib.AxisDescriptor` object, or a tuple in the
format ```tupletag, minValue, defaultValue, maxValue, name``.
The ``name`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
```instances`` should be a list of instances, with each instance either
supplied as a py:class:`.designspaceLib.InstanceDescriptor` object, or a
dict with keys ``location`` (mapping of axis tags to float values),
``stylename`` and (optionally) ``postscriptfontname``.
The ``stylename`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
"""
addFvar(self.font, axes, instances)
def setupAvar(self, axes, mappings=None):
"""Adds an axis variations table to the font.
Args:
axes (list): A list of py:class:`.designspaceLib.AxisDescriptor` objects.
"""
from .varLib import _add_avar
if "fvar" not in self.font:
raise KeyError("'fvar' table is missing; can't add 'avar'.")
axisTags = [axis.axisTag for axis in self.font["fvar"].axes]
axes = OrderedDict(enumerate(axes)) # Only values are used
_add_avar(self.font, axes, mappings, axisTags)
def setupGvar(self, variations):
gvar = self.font["gvar"] = newTable("gvar")
gvar.version = 1
gvar.reserved = 0
gvar.variations = variations
def setupGVAR(self, variations):
gvar = self.font["GVAR"] = newTable("GVAR")
gvar.version = 1
gvar.reserved = 0
gvar.variations = variations
def calcGlyphBounds(self):
"""Calculate the bounding boxes of all glyphs in the `glyf` table.
This is usually not called explicitly by client code.
"""
glyphTable = self.font["glyf"]
for glyph in glyphTable.glyphs.values():
glyph.recalcBounds(glyphTable)
def setupHorizontalMetrics(self, metrics):
"""Create a new `hmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(width, leftSidebearing)` tuples.
"""
self.setupMetrics("hmtx", metrics)
def setupVerticalMetrics(self, metrics):
"""Create a new `vmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(height, topSidebearing)` tuples.
"""
self.setupMetrics("vmtx", metrics)
def setupMetrics(self, tableTag, metrics):
"""See `setupHorizontalMetrics()` and `setupVerticalMetrics()`."""
assert tableTag in ("hmtx", "vmtx")
mtxTable = self.font[tableTag] = newTable(tableTag)
roundedMetrics = {}
for gn in metrics:
w, lsb = metrics[gn]
roundedMetrics[gn] = int(round(w)), int(round(lsb))
mtxTable.metrics = roundedMetrics
def setupHorizontalHeader(self, **values):
"""Create a new `hhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("hhea", _hheaDefaults, values)
def setupVerticalHeader(self, **values):
"""Create a new `vhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("vhea", _vheaDefaults, values)
def setupVerticalOrigins(self, verticalOrigins, defaultVerticalOrigin=None):
"""Create a new `VORG` table. The `verticalOrigins` argument must be
a dict, mapping glyph names to vertical origin values.
The `defaultVerticalOrigin` argument should be the most common vertical
origin value. If omitted, this value will be derived from the actual
values in the `verticalOrigins` argument.
"""
if defaultVerticalOrigin is None:
# find the most frequent vorg value
bag = {}
for gn in verticalOrigins:
vorg = verticalOrigins[gn]
if vorg not in bag:
bag[vorg] = 1
else:
bag[vorg] += 1
defaultVerticalOrigin = sorted(
bag, key=lambda vorg: bag[vorg], reverse=True
)[0]
self._initTableWithValues(
"VORG",
{},
dict(VOriginRecords={}, defaultVertOriginY=defaultVerticalOrigin),
)
vorgTable = self.font["VORG"]
vorgTable.majorVersion = 1
vorgTable.minorVersion = 0
for gn in verticalOrigins:
vorgTable[gn] = verticalOrigins[gn]
def setupPost(self, keepGlyphNames=True, **values):
"""Create a new `post` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
isCFF2 = "CFF2" in self.font
postTable = self._initTableWithValues("post", _postDefaults, values)
if (self.isTTF or isCFF2) and keepGlyphNames:
postTable.formatType = 2.0
postTable.extraNames = []
postTable.mapping = {}
else:
postTable.formatType = 3.0
def setupMaxp(self):
"""Create a new `maxp` table. This is called implicitly by FontBuilder
itself and is usually not called by client code.
"""
if self.isTTF:
defaults = _maxpDefaultsTTF
else:
defaults = _maxpDefaultsOTF
self._initTableWithValues("maxp", defaults, {})
def setupDummyDSIG(self):
"""This adds an empty DSIG table to the font to make some MS applications
happy. This does not properly sign the font.
"""
values = dict(
ulVersion=1,
usFlag=0,
usNumSigs=0,
signatureRecords=[],
)
self._initTableWithValues("DSIG", {}, values)
def addOpenTypeFeatures(self, features, filename=None, tables=None, debug=False):
"""Add OpenType features to the font from a string containing
Feature File syntax.
The `filename` argument is used in error messages and to determine
where to look for "include" files.
The optional `tables` argument can be a list of OTL tables tags to
build, allowing the caller to only build selected OTL tables. See
`fontTools.feaLib` for details.
The optional `debug` argument controls whether to add source debugging
information to the font in the `Debg` table.
"""
from .feaLib.builder import addOpenTypeFeaturesFromString
addOpenTypeFeaturesFromString(
self.font, features, filename=filename, tables=tables, debug=debug
)
def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"):
"""Add conditional substitutions to a Variable Font.
See `fontTools.varLib.featureVars.addFeatureVariations`.
"""
from .varLib import featureVars
if "fvar" not in self.font:
raise KeyError("'fvar' table is missing; can't add FeatureVariations.")
featureVars.addFeatureVariations(
self.font, conditionalSubstitutions, featureTag=featureTag
)
def setupCOLR(
self,
colorLayers,
version=None,
varStore=None,
varIndexMap=None,
clipBoxes=None,
allowLayerReuse=True,
):
"""Build new COLR table using color layers dictionary.
Cf. `fontTools.colorLib.builder.buildCOLR`.
"""
from fontTools.colorLib.builder import buildCOLR
glyphMap = self.font.getReverseGlyphMap()
self.font["COLR"] = buildCOLR(
colorLayers,
version=version,
glyphMap=glyphMap,
varStore=varStore,
varIndexMap=varIndexMap,
clipBoxes=clipBoxes,
allowLayerReuse=allowLayerReuse,
)
def setupCPAL(
self,
palettes,
paletteTypes=None,
paletteLabels=None,
paletteEntryLabels=None,
):
"""Build new CPAL table using list of palettes.
Optionally build CPAL v1 table using paletteTypes, paletteLabels and
paletteEntryLabels.
Cf. `fontTools.colorLib.builder.buildCPAL`.
"""
from fontTools.colorLib.builder import buildCPAL
self.font["CPAL"] = buildCPAL(
palettes,
paletteTypes=paletteTypes,
paletteLabels=paletteLabels,
paletteEntryLabels=paletteEntryLabels,
nameTable=self.font.get("name"),
)
def setupStat(self, axes, locations=None, elidedFallbackName=2):
"""Build a new 'STAT' table.
See `fontTools.otlLib.builder.buildStatTable` for details about
the arguments.
"""
from .otlLib.builder import buildStatTable
assert "name" in self.font, "name must to be set up first"
buildStatTable(
self.font,
axes,
locations,
elidedFallbackName,
macNames=any(nr.platformID == 1 for nr in self.font["name"].names),
)
def buildCmapSubTable(cmapping, format, platformID, platEncID):
subTable = cmap_classes[format](format)
subTable.cmap = cmapping
subTable.platformID = platformID
subTable.platEncID = platEncID
subTable.language = 0
return subTable
def addFvar(font, axes, instances):
from .ttLib.tables._f_v_a_r import Axis, NamedInstance
assert axes
fvar = newTable("fvar")
nameTable = font["name"]
# if there are not currently any mac names don't add them here, that's inconsistent
# https://github.com/fonttools/fonttools/issues/683
macNames = any(nr.platformID == 1 for nr in getattr(nameTable, "names", ()))
# we have all the best ways to express mac names
platforms = ((3, 1, 0x409),)
if macNames:
platforms = ((1, 0, 0),) + platforms
for axis_def in axes:
axis = Axis()
if isinstance(axis_def, tuple):
(
axis.axisTag,
axis.minValue,
axis.defaultValue,
axis.maxValue,
name,
) = axis_def
else:
(axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = (
axis_def.tag,
axis_def.minimum,
axis_def.default,
axis_def.maximum,
axis_def.name,
)
if axis_def.hidden:
axis.flags = 0x0001 # HIDDEN_AXIS
if isinstance(name, str):
name = dict(en=name)
axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font, mac=macNames)
fvar.axes.append(axis)
for instance in instances:
if isinstance(instance, dict):
coordinates = instance["location"]
name = instance["stylename"]
psname = instance.get("postscriptfontname")
else:
coordinates = instance.location
name = instance.localisedStyleName or instance.styleName
psname = instance.postScriptFontName
if isinstance(name, str):
name = dict(en=name)
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addMultilingualName(
name, ttFont=font, mac=macNames
)
if psname is not None:
inst.postscriptNameID = nameTable.addName(psname, platforms=platforms)
inst.coordinates = coordinates
fvar.instances.append(inst)
font["fvar"] = fvar
venv\Lib\site-packages\fontTools\help.py
import pkgutil
import sys
import fontTools
import importlib
import os
from pathlib import Path
def main():
"""Show this help"""
path = fontTools.__path__
descriptions = {}
for pkg in sorted(
mod.name
for mod in pkgutil.walk_packages([fontTools.__path__[0]], prefix="fontTools.")
):
try:
imports = __import__(pkg, globals(), locals(), ["main"])
except ImportError as e:
continue
try:
description = imports.main.__doc__
# Cython modules seem to return "main()" as the docstring
if description and description != "main()":
pkg = pkg.replace("fontTools.", "").replace(".__main__", "")
# show the docstring's first line only
descriptions[pkg] = description.splitlines()[0]
except AttributeError as e:
pass
for pkg, description in descriptions.items():
print("fonttools %-25s %s" % (pkg, description), file=sys.stderr)
if __name__ == "__main__":
print("fonttools v%s\n" % fontTools.__version__, file=sys.stderr)
main()
venv\Lib\site-packages\fontTools\tfmLib.py
"""Module for reading TFM (TeX Font Metrics) files.
The TFM format is described in the TFtoPL WEB source code, whose typeset form
can be found on `CTAN `_.
>>> from fontTools.tfmLib import TFM
>>> tfm = TFM("Tests/tfmLib/data/cmr10.tfm")
>>>
>>> # Accessing an attribute gets you metadata.
>>> tfm.checksum
1274110073
>>> tfm.designsize
10.0
>>> tfm.codingscheme
'TeX text'
>>> tfm.family
'CMR'
>>> tfm.seven_bit_safe_flag
False
>>> tfm.face
234
>>> tfm.extraheader
{}
>>> tfm.fontdimens
{'SLANT': 0.0, 'SPACE': 0.33333396911621094, 'STRETCH': 0.16666698455810547, 'SHRINK': 0.11111164093017578, 'XHEIGHT': 0.4305553436279297, 'QUAD': 1.0000028610229492, 'EXTRASPACE': 0.11111164093017578}
>>> # Accessing a character gets you its metrics.
>>> # “width” is always available, other metrics are available only when
>>> # applicable. All values are relative to “designsize”.
>>> tfm.chars[ord("g")]
{'width': 0.5000019073486328, 'height': 0.4305553436279297, 'depth': 0.1944446563720703, 'italic': 0.013888359069824219}
>>> # Kerning and ligature can be accessed as well.
>>> tfm.kerning[ord("c")]
{104: -0.02777862548828125, 107: -0.02777862548828125}
>>> tfm.ligatures[ord("f")]
{105: ('LIG', 12), 102: ('LIG', 11), 108: ('LIG', 13)}
"""
from types import SimpleNamespace
from fontTools.misc.sstruct import calcsize, unpack, unpack2
SIZES_FORMAT = """
>
lf: h # length of the entire file, in words
lh: h # length of the header data, in words
bc: h # smallest character code in the font
ec: h # largest character code in the font
nw: h # number of words in the width table
nh: h # number of words in the height table
nd: h # number of words in the depth table
ni: h # number of words in the italic correction table
nl: h # number of words in the ligature/kern table
nk: h # number of words in the kern table
ne: h # number of words in the extensible character table
np: h # number of font parameter words
"""
SIZES_SIZE = calcsize(SIZES_FORMAT)
FIXED_FORMAT = "12.20F"
HEADER_FORMAT1 = f"""
>
checksum: L
designsize: {FIXED_FORMAT}
"""
HEADER_FORMAT2 = f"""
{HEADER_FORMAT1}
codingscheme: 40p
"""
HEADER_FORMAT3 = f"""
{HEADER_FORMAT2}
family: 20p
"""
HEADER_FORMAT4 = f"""
{HEADER_FORMAT3}
seven_bit_safe_flag: ?
ignored: x
ignored: x
face: B
"""
HEADER_SIZE1 = calcsize(HEADER_FORMAT1)
HEADER_SIZE2 = calcsize(HEADER_FORMAT2)
HEADER_SIZE3 = calcsize(HEADER_FORMAT3)
HEADER_SIZE4 = calcsize(HEADER_FORMAT4)
LIG_KERN_COMMAND = """
>
skip_byte: B
next_char: B
op_byte: B
remainder: B
"""
BASE_PARAMS = [
"SLANT",
"SPACE",
"STRETCH",
"SHRINK",
"XHEIGHT",
"QUAD",
"EXTRASPACE",
]
MATHSY_PARAMS = [
"NUM1",
"NUM2",
"NUM3",
"DENOM1",
"DENOM2",
"SUP1",
"SUP2",
"SUP3",
"SUB1",
"SUB2",
"SUPDROP",
"SUBDROP",
"DELIM1",
"DELIM2",
"AXISHEIGHT",
]
MATHEX_PARAMS = [
"DEFAULTRULETHICKNESS",
"BIGOPSPACING1",
"BIGOPSPACING2",
"BIGOPSPACING3",
"BIGOPSPACING4",
"BIGOPSPACING5",
]
VANILLA = 0
MATHSY = 1
MATHEX = 2
UNREACHABLE = 0
PASSTHROUGH = 1
ACCESSABLE = 2
NO_TAG = 0
LIG_TAG = 1
LIST_TAG = 2
EXT_TAG = 3
STOP_FLAG = 128
KERN_FLAG = 128
class TFMException(Exception):
def __init__(self, message):
super().__init__(message)
class TFM:
def __init__(self, file):
self._read(file)
def __repr__(self):
return (
f""
)
def _read(self, file):
if hasattr(file, "read"):
data = file.read()
else:
with open(file, "rb") as fp:
data = fp.read()
self._data = data
if len(data) < SIZES_SIZE:
raise TFMException("Too short input file")
sizes = SimpleNamespace()
unpack2(SIZES_FORMAT, data, sizes)
# Do some file structure sanity checks.
# TeX and TFtoPL do additional functional checks and might even correct
# “errors” in the input file, but we instead try to output the file as
# it is as long as it is parsable, even if the data make no sense.
if sizes.lf < 0:
raise TFMException("The file claims to have negative or zero length!")
if len(data) < sizes.lf * 4:
raise TFMException("The file has fewer bytes than it claims!")
for name, length in vars(sizes).items():
if length < 0:
raise TFMException("The subfile size: '{name}' is negative!")
if sizes.lh < 2:
raise TFMException(f"The header length is only {sizes.lh}!")
if sizes.bc > sizes.ec + 1 or sizes.ec > 255:
raise TFMException(
f"The character code range {sizes.bc}..{sizes.ec} is illegal!"
)
if sizes.nw == 0 or sizes.nh == 0 or sizes.nd == 0 or sizes.ni == 0:
raise TFMException("Incomplete subfiles for character dimensions!")
if sizes.ne > 256:
raise TFMException(f"There are {ne} extensible recipes!")
if sizes.lf != (
6
+ sizes.lh
+ (sizes.ec - sizes.bc + 1)
+ sizes.nw
+ sizes.nh
+ sizes.nd
+ sizes.ni
+ sizes.nl
+ sizes.nk
+ sizes.ne
+ sizes.np
):
raise TFMException("Subfile sizes don’t add up to the stated total")
# Subfile offsets, used in the helper function below. These all are
# 32-bit word offsets not 8-bit byte offsets.
char_base = 6 + sizes.lh - sizes.bc
width_base = char_base + sizes.ec + 1
height_base = width_base + sizes.nw
depth_base = height_base + sizes.nh
italic_base = depth_base + sizes.nd
lig_kern_base = italic_base + sizes.ni
kern_base = lig_kern_base + sizes.nl
exten_base = kern_base + sizes.nk
param_base = exten_base + sizes.ne
# Helper functions for accessing individual data. If this looks
# nonidiomatic Python, I blame the effect of reading the literate WEB
# documentation of TFtoPL.
def char_info(c):
return 4 * (char_base + c)
def width_index(c):
return data[char_info(c)]
def noneexistent(c):
return c < sizes.bc or c > sizes.ec or width_index(c) == 0
def height_index(c):
return data[char_info(c) + 1] // 16
def depth_index(c):
return data[char_info(c) + 1] % 16
def italic_index(c):
return data[char_info(c) + 2] // 4
def tag(c):
return data[char_info(c) + 2] % 4
def remainder(c):
return data[char_info(c) + 3]
def width(c):
r = 4 * (width_base + width_index(c))
return read_fixed(r, "v")["v"]
def height(c):
r = 4 * (height_base + height_index(c))
return read_fixed(r, "v")["v"]
def depth(c):
r = 4 * (depth_base + depth_index(c))
return read_fixed(r, "v")["v"]
def italic(c):
r = 4 * (italic_base + italic_index(c))
return read_fixed(r, "v")["v"]
def exten(c):
return 4 * (exten_base + remainder(c))
def lig_step(i):
return 4 * (lig_kern_base + i)
def lig_kern_command(i):
command = SimpleNamespace()
unpack2(LIG_KERN_COMMAND, data[i:], command)
return command
def kern(i):
r = 4 * (kern_base + i)
return read_fixed(r, "v")["v"]
def param(i):
return 4 * (param_base + i)
def read_fixed(index, key, obj=None):
ret = unpack2(f">;{key}:{FIXED_FORMAT}", data[index:], obj)
return ret[0]
# Set all attributes to empty values regardless of the header size.
unpack(HEADER_FORMAT4, [0] * HEADER_SIZE4, self)
offset = 24
length = sizes.lh * 4
self.extraheader = {}
if length >= HEADER_SIZE4:
rest = unpack2(HEADER_FORMAT4, data[offset:], self)[1]
if self.face < 18:
s = self.face % 2
b = self.face // 2
self.face = "MBL"[b % 3] + "RI"[s] + "RCE"[b // 3]
for i in range(sizes.lh - HEADER_SIZE4 // 4):
rest = unpack2(f">;HEADER{i + 18}:l", rest, self.extraheader)[1]
elif length >= HEADER_SIZE3:
unpack2(HEADER_FORMAT3, data[offset:], self)
elif length >= HEADER_SIZE2:
unpack2(HEADER_FORMAT2, data[offset:], self)
elif length >= HEADER_SIZE1:
unpack2(HEADER_FORMAT1, data[offset:], self)
self.fonttype = VANILLA
scheme = self.codingscheme.upper()
if scheme.startswith("TEX MATH SY"):
self.fonttype = MATHSY
elif scheme.startswith("TEX MATH EX"):
self.fonttype = MATHEX
self.fontdimens = {}
for i in range(sizes.np):
name = f"PARAMETER{i+1}"
if i <= 6:
name = BASE_PARAMS[i]
elif self.fonttype == MATHSY and i <= 21:
name = MATHSY_PARAMS[i - 7]
elif self.fonttype == MATHEX and i <= 12:
name = MATHEX_PARAMS[i - 7]
read_fixed(param(i), name, self.fontdimens)
lig_kern_map = {}
self.right_boundary_char = None
self.left_boundary_char = None
if sizes.nl > 0:
cmd = lig_kern_command(lig_step(0))
if cmd.skip_byte == 255:
self.right_boundary_char = cmd.next_char
cmd = lig_kern_command(lig_step((sizes.nl - 1)))
if cmd.skip_byte == 255:
self.left_boundary_char = 256
r = 256 * cmd.op_byte + cmd.remainder
lig_kern_map[self.left_boundary_char] = r
self.chars = {}
for c in range(sizes.bc, sizes.ec + 1):
if width_index(c) > 0:
self.chars[c] = info = {}
info["width"] = width(c)
if height_index(c) > 0:
info["height"] = height(c)
if depth_index(c) > 0:
info["depth"] = depth(c)
if italic_index(c) > 0:
info["italic"] = italic(c)
char_tag = tag(c)
if char_tag == NO_TAG:
pass
elif char_tag == LIG_TAG:
lig_kern_map[c] = remainder(c)
elif char_tag == LIST_TAG:
info["nextlarger"] = remainder(c)
elif char_tag == EXT_TAG:
info["varchar"] = varchar = {}
for i in range(4):
part = data[exten(c) + i]
if i == 3 or part > 0:
name = "rep"
if i == 0:
name = "top"
elif i == 1:
name = "mid"
elif i == 2:
name = "bot"
if noneexistent(part):
varchar[name] = c
else:
varchar[name] = part
self.ligatures = {}
self.kerning = {}
for c, i in sorted(lig_kern_map.items()):
cmd = lig_kern_command(lig_step(i))
if cmd.skip_byte > STOP_FLAG:
i = 256 * cmd.op_byte + cmd.remainder
while i < sizes.nl:
cmd = lig_kern_command(lig_step(i))
if cmd.skip_byte > STOP_FLAG:
pass
else:
if cmd.op_byte >= KERN_FLAG:
r = 256 * (cmd.op_byte - KERN_FLAG) + cmd.remainder
self.kerning.setdefault(c, {})[cmd.next_char] = kern(r)
else:
r = cmd.op_byte
if r == 4 or (r > 7 and r != 11):
# Ligature step with nonstandard code, we output
# the code verbatim.
lig = r
else:
lig = ""
if r % 4 > 1:
lig += "/"
lig += "LIG"
if r % 2 != 0:
lig += "/"
while r > 3:
lig += ">"
r -= 4
self.ligatures.setdefault(c, {})[cmd.next_char] = (
lig,
cmd.remainder,
)
if cmd.skip_byte >= STOP_FLAG:
break
i += cmd.skip_byte + 1
if __name__ == "__main__":
import sys
tfm = TFM(sys.argv[1])
print(
"\n".join(
x
for x in [
f"tfm.checksum={tfm.checksum}",
f"tfm.designsize={tfm.designsize}",
f"tfm.codingscheme={tfm.codingscheme}",
f"tfm.fonttype={tfm.fonttype}",
f"tfm.family={tfm.family}",
f"tfm.seven_bit_safe_flag={tfm.seven_bit_safe_flag}",
f"tfm.face={tfm.face}",
f"tfm.extraheader={tfm.extraheader}",
f"tfm.fontdimens={tfm.fontdimens}",
f"tfm.right_boundary_char={tfm.right_boundary_char}",
f"tfm.left_boundary_char={tfm.left_boundary_char}",
f"tfm.kerning={tfm.kerning}",
f"tfm.ligatures={tfm.ligatures}",
f"tfm.chars={tfm.chars}",
]
)
)
print(tfm)
venv\Lib\site-packages\fontTools\ttx.py
"""\
usage: ttx [options] inputfile1 [... inputfileN]
TTX -- From OpenType To XML And Back
If an input file is a TrueType or OpenType font file, it will be
decompiled to a TTX file (an XML-based text format).
If an input file is a TTX file, it will be compiled to whatever
format the data is in, a TrueType or OpenType/CFF font file.
A special input value of - means read from the standard input.
Output files are created so they are unique: an existing file is
never overwritten.
General options
===============
-h Help print this message.
--version show version and exit.
-d Specify a directory where the output files are
to be created.
-o Specify a file to write the output to. A special
value of - would use the standard output.
-f Overwrite existing output file(s), ie. don't append
numbers.
-v Verbose: more messages will be written to stdout
about what is being done.
-q Quiet: No messages will be written to stdout about
what is being done.
-a allow virtual glyphs ID's on compile or decompile.
Dump options
============
-l List table info: instead of dumping to a TTX file, list
some minimal info about each table.
-t
Specify a table to dump. Multiple -t options
are allowed. When no -t option is specified, all tables
will be dumped.
-x
Specify a table to exclude from the dump. Multiple
-x options are allowed. -t and -x are mutually exclusive.
-s Split tables: save the TTX data into separate TTX files per
table and write one small TTX file that contains references
to the individual table dumps. This file can be used as
input to ttx, as long as the table files are in the
same directory.
-g Split glyf table: Save the glyf data into separate TTX files
per glyph and write a small TTX for the glyf table which
contains references to the individual TTGlyph elements.
NOTE: specifying -g implies -s (no need for -s together
with -g)
-i Do NOT disassemble TT instructions: when this option is
given, all TrueType programs (glyph programs, the font
program and the pre-program) will be written to the TTX
file as hex data instead of assembly. This saves some time
and makes the TTX file smaller.
-z Specify a bitmap data export option for EBDT:
{'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
{'raw', 'extfile'} Each option does one of the following:
-z raw
export the bitmap data as a hex dump
-z row
export each row as hex data
-z bitwise
export each row as binary in an ASCII art style
-z extfile
export the data as external files with XML references
If no export format is specified 'raw' format is used.
-e Don't ignore decompilation errors, but show a full traceback
and abort.
-y Select font number for TrueType Collection (.ttc/.otc),
starting from 0.
--unicodedata
Use custom database file to write character names in the
comments of the cmap TTX output.
--newline
Control how line endings are written in the XML file. It
can be 'LF', 'CR', or 'CRLF'. If not specified, the
default platform-specific line endings are used.
Compile options
===============
-m Merge with TrueType-input-file: specify a TrueType or
OpenType font file to be merged with the TTX file. This
option is only valid when at most one TTX file is specified.
-b Don't recalc glyph bounding boxes: use the values in the
TTX file as-is.
--recalc-timestamp
Set font 'modified' timestamp to current time.
By default, the modification time of the TTX file will be
used.
--no-recalc-timestamp
Keep the original font 'modified' timestamp.
--flavor
Specify flavor of output font file. May be 'woff' or 'woff2'.
Note that WOFF2 requires the Brotli Python extension,
available at https://github.com/google/brotli
--with-zopfli
Use Zopfli instead of Zlib to compress WOFF. The Python
extension is available at https://pypi.python.org/pypi/zopfli
--optimize-font-speed
Enable optimizations that prioritize speed over file size.
This mainly affects how glyf t able and gvar / VARC tables are
compiled. The produced fonts will be larger, but rendering
performance will be improved with HarfBuzz and other text
layout engines.
"""
from fontTools.ttLib import OPTIMIZE_FONT_SPEED, TTFont, TTLibError
from fontTools.misc.macCreatorType import getMacCreatorAndType
from fontTools.unicode import setUnicodeData
from fontTools.misc.textTools import Tag, tostr
from fontTools.misc.timeTools import timestampSinceEpoch
from fontTools.misc.loggingTools import Timer
from fontTools.misc.cliTools import makeOutputFileName
import os
import sys
import getopt
import re
import logging
log = logging.getLogger("fontTools.ttx")
opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""")
class Options(object):
listTables = False
outputDir = None
outputFile = None
overWrite = False
verbose = False
quiet = False
splitTables = False
splitGlyphs = False
disassembleInstructions = True
mergeFile = None
recalcBBoxes = True
ignoreDecompileErrors = True
bitmapGlyphDataFormat = "raw"
unicodedata = None
newlinestr = "\n"
recalcTimestamp = None
flavor = None
useZopfli = False
optimizeFontSpeed = False
def __init__(self, rawOptions, numFiles):
self.onlyTables = []
self.skipTables = []
self.fontNumber = -1
for option, value in rawOptions:
# general options
if option == "-h":
print(__doc__)
sys.exit(0)
elif option == "--version":
from fontTools import version
print(version)
sys.exit(0)
elif option == "-d":
if not os.path.isdir(value):
raise getopt.GetoptError(
"The -d option value must be an existing directory"
)
self.outputDir = value
elif option == "-o":
self.outputFile = value
elif option == "-f":
self.overWrite = True
elif option == "-v":
self.verbose = True
elif option == "-q":
self.quiet = True
# dump options
elif option == "-l":
self.listTables = True
elif option == "-t":
# pad with space if table tag length is less than 4
value = value.ljust(4)
self.onlyTables.append(value)
elif option == "-x":
# pad with space if table tag length is less than 4
value = value.ljust(4)
self.skipTables.append(value)
elif option == "-s":
self.splitTables = True
elif option == "-g":
# -g implies (and forces) splitTables
self.splitGlyphs = True
self.splitTables = True
elif option == "-i":
self.disassembleInstructions = False
elif option == "-z":
validOptions = ("raw", "row", "bitwise", "extfile")
if value not in validOptions:
raise getopt.GetoptError(
"-z does not allow %s as a format. Use %s"
% (option, validOptions)
)
self.bitmapGlyphDataFormat = value
elif option == "-y":
self.fontNumber = int(value)
# compile options
elif option == "-m":
self.mergeFile = value
elif option == "-b":
self.recalcBBoxes = False
elif option == "-e":
self.ignoreDecompileErrors = False
elif option == "--unicodedata":
self.unicodedata = value
elif option == "--newline":
validOptions = ("LF", "CR", "CRLF")
if value == "LF":
self.newlinestr = "\n"
elif value == "CR":
self.newlinestr = "\r"
elif value == "CRLF":
self.newlinestr = "\r\n"
else:
raise getopt.GetoptError(
"Invalid choice for --newline: %r (choose from %s)"
% (value, ", ".join(map(repr, validOptions)))
)
elif option == "--recalc-timestamp":
self.recalcTimestamp = True
elif option == "--no-recalc-timestamp":
self.recalcTimestamp = False
elif option == "--flavor":
self.flavor = value
elif option == "--with-zopfli":
self.useZopfli = True
elif option == "--optimize-font-speed":
self.optimizeFontSpeed = True
if self.verbose and self.quiet:
raise getopt.GetoptError("-q and -v options are mutually exclusive")
if self.verbose:
self.logLevel = logging.DEBUG
elif self.quiet:
self.logLevel = logging.WARNING
else:
self.logLevel = logging.INFO
if self.mergeFile and self.flavor:
raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
if self.onlyTables and self.skipTables:
raise getopt.GetoptError("-t and -x options are mutually exclusive")
if self.mergeFile and numFiles > 1:
raise getopt.GetoptError(
"Must specify exactly one TTX source file when using -m"
)
if self.flavor != "woff" and self.useZopfli:
raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
def ttList(input, output, options):
ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
reader = ttf.reader
tags = sorted(reader.keys())
print('Listing table info for "%s":' % input)
format = " %4s %10s %8s %8s"
print(format % ("tag ", " checksum", " length", " offset"))
print(format % ("----", "----------", "--------", "--------"))
for tag in tags:
entry = reader.tables[tag]
if ttf.flavor == "woff2":
# WOFF2 doesn't store table checksums, so they must be calculated
from fontTools.ttLib.sfnt import calcChecksum
data = entry.loadData(reader.transformBuffer)
checkSum = calcChecksum(data)
else:
checkSum = int(entry.checkSum)
if checkSum < 0:
checkSum = checkSum + 0x100000000
checksum = "0x%08X" % checkSum
print(format % (tag, checksum, entry.length, entry.offset))
print()
ttf.close()
@Timer(log, "Done dumping TTX in %(time).3f seconds")
def ttDump(input, output, options):
input_name = input
if input == "-":
input, input_name = sys.stdin.buffer, sys.stdin.name
output_name = output
if output == "-":
output, output_name = sys.stdout, sys.stdout.name
log.info('Dumping "%s" to "%s"...', input_name, output_name)
if options.unicodedata:
setUnicodeData(options.unicodedata)
ttf = TTFont(
input,
0,
ignoreDecompileErrors=options.ignoreDecompileErrors,
fontNumber=options.fontNumber,
)
ttf.saveXML(
output,
tables=options.onlyTables,
skipTables=options.skipTables,
splitTables=options.splitTables,
splitGlyphs=options.splitGlyphs,
disassembleInstructions=options.disassembleInstructions,
bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
newlinestr=options.newlinestr,
)
ttf.close()
@Timer(log, "Done compiling TTX in %(time).3f seconds")
def ttCompile(input, output, options):
input_name = input
if input == "-":
input, input_name = sys.stdin, sys.stdin.name
output_name = output
if output == "-":
output, output_name = sys.stdout.buffer, sys.stdout.name
log.info('Compiling "%s" to "%s"...' % (input_name, output))
if options.useZopfli:
from fontTools.ttLib import sfnt
sfnt.USE_ZOPFLI = True
ttf = TTFont(
options.mergeFile,
flavor=options.flavor,
recalcBBoxes=options.recalcBBoxes,
recalcTimestamp=options.recalcTimestamp,
)
if options.optimizeFontSpeed:
ttf.cfg[OPTIMIZE_FONT_SPEED] = options.optimizeFontSpeed
ttf.importXML(input)
if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin:
# use TTX file modification time for head "modified" timestamp
mtime = os.path.getmtime(input)
ttf["head"].modified = timestampSinceEpoch(mtime)
ttf.save(output)
def guessFileType(fileName):
if fileName == "-":
header = sys.stdin.buffer.peek(256)
ext = ""
else:
base, ext = os.path.splitext(fileName)
try:
with open(fileName, "rb") as f:
header = f.read(256)
except IOError:
return None
if header.startswith(b"\xef\xbb\xbf
venv\Lib\site-packages\fontTools\unicode.py
def _makeunicodes(f):
lines = iter(f.readlines())
unicodes = {}
for line in lines:
if not line:
continue
num, name = line.split(";")[:2]
if name[0] == "<":
continue # "", etc.
num = int(num, 16)
unicodes[num] = name
return unicodes
class _UnicodeCustom(object):
def __init__(self, f):
if isinstance(f, str):
with open(f) as fd:
codes = _makeunicodes(fd)
else:
codes = _makeunicodes(f)
self.codes = codes
def __getitem__(self, charCode):
try:
return self.codes[charCode]
except KeyError:
return "????"
class _UnicodeBuiltin(object):
def __getitem__(self, charCode):
try:
# use unicodedata backport to python2, if available:
# https://github.com/mikekap/unicodedata2
import unicodedata2 as unicodedata
except ImportError:
import unicodedata
try:
return unicodedata.name(chr(charCode))
except ValueError:
return "????"
Unicode = _UnicodeBuiltin()
def setUnicodeData(f):
global Unicode
Unicode = _UnicodeCustom(f)
venv\Lib\site-packages\fontTools\__init__.py
import logging
from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
version = __version__ = "4.59.0"
__all__ = ["version", "log", "configLogger"]
venv\Lib\site-packages\fontTools\__main__.py
import sys
def main(args=None):
if args is None:
args = sys.argv[1:]
# TODO Handle library-wide options. Eg.:
# --unicodedata
# --verbose / other logging stuff
# TODO Allow a way to run arbitrary modules? Useful for setting
# library-wide options and calling another library. Eg.:
#
# $ fonttools --unicodedata=... fontmake ...
#
# This allows for a git-like command where thirdparty commands
# can be added. Should we just try importing the fonttools
# module first and try without if it fails?
if len(sys.argv) < 2:
sys.argv.append("help")
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
sys.argv[1] = "help"
mod = "fontTools." + sys.argv[1]
sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
del sys.argv[0]
import runpy
runpy.run_module(mod, run_name="__main__")
if __name__ == "__main__":
sys.exit(main())
venv\Lib\site-packages\kiwisolver\exceptions.py
# --------------------------------------------------------------------------------------
# Copyright (c) 2023-2024, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
"""Kiwi exceptions.
Imported by the kiwisolver C extension.
"""
class BadRequiredStrength(Exception):
pass
class DuplicateConstraint(Exception):
__slots__ = ("constraint",)
def __init__(self, constraint):
self.constraint = constraint
class DuplicateEditVariable(Exception):
__slots__ = ("edit_variable",)
def __init__(self, edit_variable):
self.edit_variable = edit_variable
class UnknownConstraint(Exception):
__slots__ = ("constraint",)
def __init__(self, constraint):
self.constraint = constraint
class UnknownEditVariable(Exception):
__slots__ = ("edit_variable",)
def __init__(self, edit_variable):
self.edit_variable = edit_variable
class UnsatisfiableConstraint(Exception):
__slots__ = ("constraint",)
def __init__(self, constraint):
self.constraint = constraint
venv\Lib\site-packages\kiwisolver\__init__.py
# --------------------------------------------------------------------------------------
# Copyright (c) 2013-2024, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
from ._cext import (
Constraint,
Expression,
Solver,
Term,
Variable,
__kiwi_version__,
__version__,
strength,
)
from .exceptions import (
BadRequiredStrength,
DuplicateConstraint,
DuplicateEditVariable,
UnknownConstraint,
UnknownEditVariable,
UnsatisfiableConstraint,
)
__all__ = [
"BadRequiredStrength",
"Constraint",
"DuplicateConstraint",
"DuplicateEditVariable",
"Expression",
"Solver",
"Term",
"UnknownConstraint",
"UnknownEditVariable",
"UnsatisfiableConstraint",
"Variable",
"__kiwi_version__",
"__version__",
"strength",
]
venv\Lib\site-packages\matplotlib\animation.py
import abc
import base64
import contextlib
from io import BytesIO, TextIOWrapper
import itertools
import logging
from pathlib import Path
import shutil
import subprocess
import sys
from tempfile import TemporaryDirectory
import uuid
import warnings
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib._animation_data import (
DISPLAY_TEMPLATE, INCLUDED_FRAMES, JS_INCLUDE, STYLE_INCLUDE)
from matplotlib import _api, cbook
import matplotlib.colors as mcolors
_log = logging.getLogger(__name__)
# Process creation flag for subprocess to prevent it raising a terminal
# window. See for example https://stackoverflow.com/q/24130623/
subprocess_creation_flags = (
subprocess.CREATE_NO_WINDOW if sys.platform == 'win32' else 0)
def adjusted_figsize(w, h, dpi, n):
"""
Compute figure size so that pixels are a multiple of n.
Parameters
----------
w, h : float
Size in inches.
dpi : float
The dpi.
n : int
The target multiple.
Returns
-------
wnew, hnew : float
The new figure size in inches.
"""
# this maybe simplified if / when we adopt consistent rounding for
# pixel size across the whole library
def correct_roundoff(x, dpi, n):
if int(x*dpi) % n != 0:
if int(np.nextafter(x, np.inf)*dpi) % n == 0:
x = np.nextafter(x, np.inf)
elif int(np.nextafter(x, -np.inf)*dpi) % n == 0:
x = np.nextafter(x, -np.inf)
return x
wnew = int(w * dpi / n) * n / dpi
hnew = int(h * dpi / n) * n / dpi
return correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n)
class MovieWriterRegistry:
"""Registry of available writer classes by human readable name."""
def __init__(self):
self._registered = dict()
def register(self, name):
"""
Decorator for registering a class under a name.
Example use::
@registry.register(name)
class Foo:
pass
"""
def wrapper(writer_cls):
self._registered[name] = writer_cls
return writer_cls
return wrapper
def is_available(self, name):
"""
Check if given writer is available by name.
Parameters
----------
name : str
Returns
-------
bool
"""
try:
cls = self._registered[name]
except KeyError:
return False
return cls.isAvailable()
def __iter__(self):
"""Iterate over names of available writer class."""
for name in self._registered:
if self.is_available(name):
yield name
def list(self):
"""Get a list of available MovieWriters."""
return [*self]
def __getitem__(self, name):
"""Get an available writer class from its name."""
if self.is_available(name):
return self._registered[name]
raise RuntimeError(f"Requested MovieWriter ({name}) not available")
writers = MovieWriterRegistry()
class AbstractMovieWriter(abc.ABC):
"""
Abstract base class for writing movies, providing a way to grab frames by
calling `~AbstractMovieWriter.grab_frame`.
`setup` is called to start the process and `finish` is called afterwards.
`saving` is provided as a context manager to facilitate this process as ::
with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100):
# Iterate over frames
moviewriter.grab_frame(**savefig_kwargs)
The use of the context manager ensures that `setup` and `finish` are
performed as necessary.
An instance of a concrete subclass of this class can be given as the
``writer`` argument of `Animation.save()`.
"""
def __init__(self, fps=5, metadata=None, codec=None, bitrate=None):
self.fps = fps
self.metadata = metadata if metadata is not None else {}
self.codec = mpl._val_or_rc(codec, 'animation.codec')
self.bitrate = mpl._val_or_rc(bitrate, 'animation.bitrate')
@abc.abstractmethod
def setup(self, fig, outfile, dpi=None):
"""
Setup for writing the movie file.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object that contains the information for frames.
outfile : str
The filename of the resulting movie file.
dpi : float, default: ``fig.dpi``
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
"""
# Check that path is valid
Path(outfile).parent.resolve(strict=True)
self.outfile = outfile
self.fig = fig
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
@property
def frame_size(self):
"""A tuple ``(width, height)`` in pixels of a movie frame."""
w, h = self.fig.get_size_inches()
return int(w * self.dpi), int(h * self.dpi)
def _supports_transparency(self):
"""
Whether this writer supports transparency.
Writers may consult output file type and codec to determine this at runtime.
"""
return False
@abc.abstractmethod
def grab_frame(self, **savefig_kwargs):
"""
Grab the image information from the figure and save as a movie frame.
All keyword arguments in *savefig_kwargs* are passed on to the
`~.Figure.savefig` call that saves the figure. However, several
keyword arguments that are supported by `~.Figure.savefig` may not be
passed as they are controlled by the MovieWriter:
- *dpi*, *bbox_inches*: These may not be passed because each frame of the
animation much be exactly the same size in pixels.
- *format*: This is controlled by the MovieWriter.
"""
@abc.abstractmethod
def finish(self):
"""Finish any processing for writing the movie."""
@contextlib.contextmanager
def saving(self, fig, outfile, dpi, *args, **kwargs):
"""
Context manager to facilitate writing the movie file.
``*args, **kw`` are any parameters that should be passed to `setup`.
"""
if mpl.rcParams['savefig.bbox'] == 'tight':
_log.info("Disabling savefig.bbox = 'tight', as it may cause "
"frame size to vary, which is inappropriate for "
"animation.")
# This particular sequence is what contextlib.contextmanager wants
self.setup(fig, outfile, dpi, *args, **kwargs)
with mpl.rc_context({'savefig.bbox': None}):
try:
yield self
finally:
self.finish()
class MovieWriter(AbstractMovieWriter):
"""
Base class for writing movies.
This is a base class for MovieWriter subclasses that write a movie frame
data to a pipe. You cannot instantiate this class directly.
See examples for how to use its subclasses.
Attributes
----------
frame_format : str
The format used in writing frame data, defaults to 'rgba'.
fig : `~matplotlib.figure.Figure`
The figure to capture data from.
This must be provided by the subclasses.
"""
# Builtin writer subclasses additionally define the _exec_key and _args_key
# attributes, which indicate the rcParams entries where the path to the
# executable and additional command-line arguments to the executable are
# stored. Third-party writers cannot meaningfully set these as they cannot
# extend rcParams with new keys.
# Pipe-based writers only support RGBA, but file-based ones support more
# formats.
supported_formats = ["rgba"]
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
"""
Parameters
----------
fps : int, default: 5
Movie frame rate (per second).
codec : str or None, default: :rc:`animation.codec`
The codec to use.
bitrate : int, default: :rc:`animation.bitrate`
The bitrate of the movie, in kilobits per second. Higher values
means higher quality movies, but increase the file size. A value
of -1 lets the underlying movie encoder select the bitrate.
extra_args : list of str or None, optional
Extra command-line arguments passed to the underlying movie encoder. These
arguments are passed last to the encoder, just before the filename. The
default, None, means to use :rc:`animation.[name-of-encoder]_args` for the
builtin writers.
metadata : dict[str, str], default: {}
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
"""
if type(self) is MovieWriter:
# TODO MovieWriter is still an abstract class and needs to be
# extended with a mixin. This should be clearer in naming
# and description. For now, just give a reasonable error
# message to users.
raise TypeError(
'MovieWriter cannot be instantiated directly. Please use one '
'of its subclasses.')
super().__init__(fps=fps, metadata=metadata, codec=codec,
bitrate=bitrate)
self.frame_format = self.supported_formats[0]
self.extra_args = extra_args
def _adjust_frame_size(self):
if self.codec == 'h264':
wo, ho = self.fig.get_size_inches()
w, h = adjusted_figsize(wo, ho, self.dpi, 2)
if (wo, ho) != (w, h):
self.fig.set_size_inches(w, h, forward=True)
_log.info('figure size in inches has been adjusted '
'from %s x %s to %s x %s', wo, ho, w, h)
else:
w, h = self.fig.get_size_inches()
_log.debug('frame size in pixels is %s x %s', *self.frame_size)
return w, h
def setup(self, fig, outfile, dpi=None):
# docstring inherited
super().setup(fig, outfile, dpi=dpi)
self._w, self._h = self._adjust_frame_size()
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
_log.info('MovieWriter._run: running command: %s',
cbook._pformat_subprocess(command))
PIPE = subprocess.PIPE
self._proc = subprocess.Popen(
command, stdin=PIPE, stdout=PIPE, stderr=PIPE,
creationflags=subprocess_creation_flags)
def finish(self):
"""Finish any processing for writing the movie."""
out, err = self._proc.communicate()
# Use the encoding/errors that universal_newlines would use.
out = TextIOWrapper(BytesIO(out)).read()
err = TextIOWrapper(BytesIO(err)).read()
if out:
_log.log(
logging.WARNING if self._proc.returncode else logging.DEBUG,
"MovieWriter stdout:\n%s", out)
if err:
_log.log(
logging.WARNING if self._proc.returncode else logging.DEBUG,
"MovieWriter stderr:\n%s", err)
if self._proc.returncode:
raise subprocess.CalledProcessError(
self._proc.returncode, self._proc.args, out, err)
def grab_frame(self, **savefig_kwargs):
# docstring inherited
_validate_grabframe_kwargs(savefig_kwargs)
_log.debug('MovieWriter.grab_frame: Grabbing frame.')
# Readjust the figure size in case it has been changed by the user.
# All frames must have the same size to save the movie correctly.
self.fig.set_size_inches(self._w, self._h)
# Save the figure data to the sink, using the frame format and dpi.
self.fig.savefig(self._proc.stdin, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
def _args(self):
"""Assemble list of encoder-specific command-line arguments."""
return NotImplementedError("args needs to be implemented by subclass.")
@classmethod
def bin_path(cls):
"""
Return the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
"""
return str(mpl.rcParams[cls._exec_key])
@classmethod
def isAvailable(cls):
"""Return whether a MovieWriter subclass is actually available."""
return shutil.which(cls.bin_path()) is not None
class FileMovieWriter(MovieWriter):
"""
`MovieWriter` for writing to individual files and stitching at the end.
This must be sub-classed to be useful.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.frame_format = mpl.rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi=None, frame_prefix=None):
"""
Setup for writing the movie file.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to grab the rendered frames from.
outfile : str
The filename of the resulting movie file.
dpi : float, default: ``fig.dpi``
The dpi of the output file. This, with the figure size,
controls the size in pixels of the resulting movie file.
frame_prefix : str, optional
The filename prefix to use for temporary files. If *None* (the
default), files are written to a temporary directory which is
deleted by `finish`; if not *None*, no temporary files are
deleted.
"""
# Check that path is valid
Path(outfile).parent.resolve(strict=True)
self.fig = fig
self.outfile = outfile
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
self._adjust_frame_size()
if frame_prefix is None:
self._tmpdir = TemporaryDirectory()
self.temp_prefix = str(Path(self._tmpdir.name, 'tmp'))
else:
self._tmpdir = None
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_paths = list()
self.fname_format_str = '%s%%07d.%s'
def __del__(self):
if hasattr(self, '_tmpdir') and self._tmpdir:
self._tmpdir.cleanup()
@property
def frame_format(self):
"""
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
"""
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
_api.warn_external(
f"Ignoring file format {frame_format!r} which is not "
f"supported by {type(self).__name__}; using "
f"{self.supported_formats[0]} instead.")
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def grab_frame(self, **savefig_kwargs):
# docstring inherited
# Creates a filename for saving using basename and counter.
_validate_grabframe_kwargs(savefig_kwargs)
path = Path(self._base_temp_name() % self._frame_counter)
self._temp_paths.append(path) # Record the filename for later use.
self._frame_counter += 1 # Ensures each created name is unique.
_log.debug('FileMovieWriter.grab_frame: Grabbing frame %d to path=%s',
self._frame_counter, path)
with open(path, 'wb') as sink: # Save figure to the sink.
self.fig.savefig(sink, format=self.frame_format, dpi=self.dpi,
**savefig_kwargs)
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
try:
self._run()
super().finish()
finally:
if self._tmpdir:
_log.debug(
'MovieWriter: clearing temporary path=%s', self._tmpdir
)
self._tmpdir.cleanup()
@writers.register('pillow')
class PillowWriter(AbstractMovieWriter):
def _supports_transparency(self):
return True
@classmethod
def isAvailable(cls):
return True
def setup(self, fig, outfile, dpi=None):
super().setup(fig, outfile, dpi=dpi)
self._frames = []
def grab_frame(self, **savefig_kwargs):
_validate_grabframe_kwargs(savefig_kwargs)
buf = BytesIO()
self.fig.savefig(
buf, **{**savefig_kwargs, "format": "rgba", "dpi": self.dpi})
im = Image.frombuffer(
"RGBA", self.frame_size, buf.getbuffer(), "raw", "RGBA", 0, 1)
if im.getextrema()[3][0] < 255:
# This frame has transparency, so we'll just add it as is.
self._frames.append(im)
else:
# Without transparency, we switch to RGB mode, which converts to P mode a
# little better if needed (specifically, this helps with GIF output.)
self._frames.append(im.convert("RGB"))
def finish(self):
self._frames[0].save(
self.outfile, save_all=True, append_images=self._frames[1:],
duration=int(1000 / self.fps), loop=0)
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase:
"""
Mixin class for FFMpeg output.
This is a base class for the concrete `FFMpegWriter` and `FFMpegFileWriter`
classes.
"""
_exec_key = 'animation.ffmpeg_path'
_args_key = 'animation.ffmpeg_args'
def _supports_transparency(self):
suffix = Path(self.outfile).suffix
if suffix in {'.apng', '.avif', '.gif', '.webm', '.webp'}:
return True
# This list was found by going through `ffmpeg -codecs` for video encoders,
# running them with _support_transparency() forced to True, and checking that
# the "Pixel format" in Kdenlive included alpha. Note this is not a guarantee
# that transparency will work; you may also need to pass `-pix_fmt`, but we
# trust the user has done so if they are asking for these formats.
return self.codec in {
'apng', 'avrp', 'bmp', 'cfhd', 'dpx', 'ffv1', 'ffvhuff', 'gif', 'huffyuv',
'jpeg2000', 'ljpeg', 'png', 'prores', 'prores_aw', 'prores_ks', 'qtrle',
'rawvideo', 'targa', 'tiff', 'utvideo', 'v408', }
@property
def output_args(self):
args = []
suffix = Path(self.outfile).suffix
if suffix in {'.apng', '.avif', '.gif', '.webm', '.webp'}:
self.codec = suffix[1:]
else:
args.extend(['-vcodec', self.codec])
extra_args = (self.extra_args if self.extra_args is not None
else mpl.rcParams[self._args_key])
# For h264, the default format is yuv444p, which is not compatible
# with quicktime (and others). Specifying yuv420p fixes playback on
# iOS, as well as HTML5 video in firefox and safari (on both Windows and
# macOS). Also fixes internet explorer. This is as of 2015/10/29.
if self.codec == 'h264' and '-pix_fmt' not in extra_args:
args.extend(['-pix_fmt', 'yuv420p'])
# For GIF, we're telling FFmpeg to split the video stream, to generate
# a palette, and then use it for encoding.
elif self.codec == 'gif' and '-filter_complex' not in extra_args:
args.extend(['-filter_complex',
'split [a][b];[a] palettegen [p];[b][p] paletteuse'])
# For AVIF, we're telling FFmpeg to split the video stream, extract the alpha,
# in order to place it in a secondary stream, as needed by AVIF-in-FFmpeg.
elif self.codec == 'avif' and '-filter_complex' not in extra_args:
args.extend(['-filter_complex',
'split [rgb][rgba]; [rgba] alphaextract [alpha]',
'-map', '[rgb]', '-map', '[alpha]'])
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate]) # %dk: bitrate in kbps.
for k, v in self.metadata.items():
args.extend(['-metadata', f'{k}={v}'])
args.extend(extra_args)
return args + ['-y', self.outfile]
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(FFMpegBase, MovieWriter):
"""
Pipe-based ffmpeg writer.
Frames are streamed directly to ffmpeg via a pipe and written in a single pass.
This effectively works as a slideshow input to ffmpeg with the fps passed as
``-framerate``, so see also `their notes on frame rates`_ for further details.
.. _their notes on frame rates: https://trac.ffmpeg.org/wiki/Slideshow#Framerates
"""
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-framerate', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
# If you have a lot of frames in your animation and set logging to
# DEBUG, you will have a buffer overrun.
if _log.getEffectiveLevel() > logging.DEBUG:
args += ['-loglevel', 'error']
args += ['-i', 'pipe:'] + self.output_args
return args
# Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FFMpegBase, FileMovieWriter):
"""
File-based ffmpeg writer.
Frames are written to temporary files on disk and then stitched together at the end.
This effectively works as a slideshow input to ffmpeg with the fps passed as
``-framerate``, so see also `their notes on frame rates`_ for further details.
.. _their notes on frame rates: https://trac.ffmpeg.org/wiki/Slideshow#Framerates
"""
supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
args = []
# For raw frames, we need to explicitly tell ffmpeg the metadata.
if self.frame_format in {'raw', 'rgba'}:
args += [
'-f', 'image2', '-vcodec', 'rawvideo',
'-video_size', '%dx%d' % self.frame_size,
'-pixel_format', 'rgba',
]
args += ['-framerate', str(self.fps), '-i', self._base_temp_name()]
if not self._tmpdir:
args += ['-frames:v', str(self._frame_counter)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
# If you have a lot of frames in your animation and set logging to
# DEBUG, you will have a buffer overrun.
if _log.getEffectiveLevel() > logging.DEBUG:
args += ['-loglevel', 'error']
return [self.bin_path(), *args, *self.output_args]
# Base class for animated GIFs with ImageMagick
class ImageMagickBase:
"""
Mixin class for ImageMagick output.
This is a base class for the concrete `ImageMagickWriter` and
`ImageMagickFileWriter` classes, which define an ``input_names`` attribute
(or property) specifying the input names passed to ImageMagick.
"""
_exec_key = 'animation.convert_path'
_args_key = 'animation.convert_args'
def _supports_transparency(self):
suffix = Path(self.outfile).suffix
return suffix in {'.apng', '.avif', '.gif', '.webm', '.webp'}
def _args(self):
# ImageMagick does not recognize "raw".
fmt = "rgba" if self.frame_format == "raw" else self.frame_format
extra_args = (self.extra_args if self.extra_args is not None
else mpl.rcParams[self._args_key])
return [
self.bin_path(),
"-size", "%ix%i" % self.frame_size,
"-depth", "8",
"-delay", str(100 / self.fps),
"-loop", "0",
f"{fmt}:{self.input_names}",
*extra_args,
self.outfile,
]
@classmethod
def bin_path(cls):
binpath = super().bin_path()
if binpath == 'convert':
binpath = mpl._get_executable_info('magick').executable
return binpath
@classmethod
def isAvailable(cls):
try:
return super().isAvailable()
except mpl.ExecutableNotFoundError as _enf:
# May be raised by get_executable_info.
_log.debug('ImageMagick unavailable due to: %s', _enf)
return False
# Combine ImageMagick options with pipe-based writing
@writers.register('imagemagick')
class ImageMagickWriter(ImageMagickBase, MovieWriter):
"""
Pipe-based animated gif writer.
Frames are streamed directly to ImageMagick via a pipe and written
in a single pass.
"""
input_names = "-" # stdin
# Combine ImageMagick options with temp file-based writing
@writers.register('imagemagick_file')
class ImageMagickFileWriter(ImageMagickBase, FileMovieWriter):
"""
File-based animated gif writer.
Frames are written to temporary files on disk and then stitched
together at the end.
"""
supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba']
input_names = property(
lambda self: f'{self.temp_prefix}*.{self.frame_format}')
# Taken directly from jakevdp's JSAnimation package at
# http://github.com/jakevdp/JSAnimation
def _included_frames(frame_count, frame_format, frame_dir):
return INCLUDED_FRAMES.format(Nframes=frame_count,
frame_dir=frame_dir,
frame_format=frame_format)
def _embedded_frames(frame_list, frame_format):
"""frame_list should be a list of base64-encoded png files"""
if frame_format == 'svg':
# Fix MIME type for svg
frame_format = 'svg+xml'
template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
return "\n" + "".join(
template.format(i, frame_format, frame_data.replace('\n', '\\\n'))
for i, frame_data in enumerate(frame_list))
@writers.register('html')
class HTMLWriter(FileMovieWriter):
"""Writer for JavaScript-based HTML movies."""
supported_formats = ['png', 'jpeg', 'tiff', 'svg']
@classmethod
def isAvailable(cls):
return True
def __init__(self, fps=30, codec=None, bitrate=None, extra_args=None,
metadata=None, embed_frames=False, default_mode='loop',
embed_limit=None):
if extra_args:
_log.warning("HTMLWriter ignores 'extra_args'")
extra_args = () # Don't lookup nonexistent rcParam[args_key].
self.embed_frames = embed_frames
self.default_mode = default_mode.lower()
_api.check_in_list(['loop', 'once', 'reflect'],
default_mode=self.default_mode)
# Save embed limit, which is given in MB
self._bytes_limit = mpl._val_or_rc(embed_limit, 'animation.embed_limit')
# Convert from MB to bytes
self._bytes_limit *= 1024 * 1024
super().__init__(fps, codec, bitrate, extra_args, metadata)
def setup(self, fig, outfile, dpi=None, frame_dir=None):
outfile = Path(outfile)
_api.check_in_list(['.html', '.htm'], outfile_extension=outfile.suffix)
self._saved_frames = []
self._total_bytes = 0
self._hit_limit = False
if not self.embed_frames:
if frame_dir is None:
frame_dir = outfile.with_name(outfile.stem + '_frames')
frame_dir.mkdir(parents=True, exist_ok=True)
frame_prefix = frame_dir / 'frame'
else:
frame_prefix = None
super().setup(fig, outfile, dpi, frame_prefix)
self._clear_temp = False
def grab_frame(self, **savefig_kwargs):
_validate_grabframe_kwargs(savefig_kwargs)
if self.embed_frames:
# Just stop processing if we hit the limit
if self._hit_limit:
return
f = BytesIO()
self.fig.savefig(f, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
imgdata64 = base64.encodebytes(f.getvalue()).decode('ascii')
self._total_bytes += len(imgdata64)
if self._total_bytes >= self._bytes_limit:
_log.warning(
"Animation size has reached %s bytes, exceeding the limit "
"of %s. If you're sure you want a larger animation "
"embedded, set the animation.embed_limit rc parameter to "
"a larger value (in MB). This and further frames will be "
"dropped.", self._total_bytes, self._bytes_limit)
self._hit_limit = True
else:
self._saved_frames.append(imgdata64)
else:
return super().grab_frame(**savefig_kwargs)
def finish(self):
# save the frames to an html file
if self.embed_frames:
fill_frames = _embedded_frames(self._saved_frames,
self.frame_format)
frame_count = len(self._saved_frames)
else:
# temp names is filled by FileMovieWriter
frame_count = len(self._temp_paths)
fill_frames = _included_frames(
frame_count, self.frame_format,
self._temp_paths[0].parent.relative_to(self.outfile.parent))
mode_dict = dict(once_checked='',
loop_checked='',
reflect_checked='')
mode_dict[self.default_mode + '_checked'] = 'checked'
interval = 1000 // self.fps
with open(self.outfile, 'w') as of:
of.write(JS_INCLUDE + STYLE_INCLUDE)
of.write(DISPLAY_TEMPLATE.format(id=uuid.uuid4().hex,
Nframes=frame_count,
fill_frames=fill_frames,
interval=interval,
**mode_dict))
# Duplicate the temporary file clean up logic from
# FileMovieWriter.finish. We cannot call the inherited version of
# finish because it assumes that there is a subprocess that we either
# need to call to merge many frames together or that there is a
# subprocess call that we need to clean up.
if self._tmpdir:
_log.debug('MovieWriter: clearing temporary path=%s', self._tmpdir)
self._tmpdir.cleanup()
class Animation:
"""
A base class for Animations.
This class is not usable as is, and should be subclassed to provide needed
behavior.
.. note::
You must store the created Animation in a variable that lives as long
as the animation should run. Otherwise, the Animation object will be
garbage-collected and the animation stops.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure object used to get needed events, such as draw or resize.
event_source : object, optional
A class that can run a callback when desired events
are generated, as well as be stopped and started.
Examples include timers (see `TimedAnimation`) and file
system notifications.
blit : bool, default: False
Whether blitting is used to optimize drawing. If the backend does not
support blitting, then this parameter has no effect.
See Also
--------
FuncAnimation, ArtistAnimation
"""
def __init__(self, fig, event_source=None, blit=False):
self._draw_was_started = False
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def __del__(self):
if not getattr(self, '_draw_was_started', True):
warnings.warn(
'Animation was deleted without rendering anything. This is '
'most likely not intended. To prevent deletion, assign the '
'Animation to a variable, e.g. `anim`, that exists until you '
'output the Animation using `plt.show()` or '
'`anim.save()`.'
)
def _start(self, *args):
"""
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
"""
# Do not start the event source if saving() it.
if self._fig.canvas.is_saving():
return
# First disconnect our draw event handler
self._fig.canvas.mpl_disconnect(self._first_draw_id)
# Now do any initial draw
self._init_draw()
# Add our callback for stepping the animation and
# actually start the event_source.
self.event_source.add_callback(self._step)
self.event_source.start()
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None, *, progress_callback=None):
"""
Save the animation as a movie file by drawing every frame.
Parameters
----------
filename : str
The output filename, e.g., :file:`mymovie.mp4`.
writer : `MovieWriter` or str, default: :rc:`animation.writer`
A `MovieWriter` instance to use or a key that identifies a
class to use, such as 'ffmpeg'.
fps : int, optional
Movie frame rate (per second). If not set, the frame rate from the
animation's frame interval.
dpi : float, default: :rc:`savefig.dpi`
Controls the dots per inch for the movie frames. Together with
the figure's size in inches, this controls the size of the movie.
codec : str, default: :rc:`animation.codec`.
The video codec to use. Not all codecs are supported by a given
`MovieWriter`.
bitrate : int, default: :rc:`animation.bitrate`
The bitrate of the movie, in kilobits per second. Higher values
means higher quality movies, but increase the file size. A value
of -1 lets the underlying movie encoder select the bitrate.
extra_args : list of str or None, optional
Extra command-line arguments passed to the underlying movie encoder. These
arguments are passed last to the encoder, just before the output filename.
The default, None, means to use :rc:`animation.[name-of-encoder]_args` for
the builtin writers.
metadata : dict[str, str], default: {}
Dictionary of keys and values for metadata to include in
the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
extra_anim : list, default: []
Additional `Animation` objects that should be included
in the saved movie file. These need to be from the same
`.Figure` instance. Also, animation frames will
just be simply combined, so there should be a 1:1 correspondence
between the frames from the different animations.
savefig_kwargs : dict, default: {}
Keyword arguments passed to each `~.Figure.savefig` call used to
save the individual frames.
progress_callback : function, optional
A callback function that will be called for every frame to notify
the saving progress. It must have the signature ::
def func(current_frame: int, total_frames: int) -> Any
where *current_frame* is the current frame number and *total_frames* is the
total number of frames to be saved. *total_frames* is set to None, if the
total number of frames cannot be determined. Return values may exist but are
ignored.
Example code to write the progress to stdout::
progress_callback = lambda i, n: print(f'Saving frame {i}/{n}')
Notes
-----
*fps*, *codec*, *bitrate*, *extra_args* and *metadata* are used to
construct a `.MovieWriter` instance and can only be passed if
*writer* is a string. If they are passed as non-*None* and *writer*
is a `.MovieWriter`, a `RuntimeError` will be raised.
"""
all_anim = [self]
if extra_anim is not None:
all_anim.extend(anim for anim in extra_anim
if anim._fig is self._fig)
# Disable "Animation was deleted without rendering" warning.
for anim in all_anim:
anim._draw_was_started = True
if writer is None:
writer = mpl.rcParams['animation.writer']
elif (not isinstance(writer, str) and
any(arg is not None
for arg in (fps, codec, bitrate, extra_args, metadata))):
raise RuntimeError('Passing in values for arguments '
'fps, codec, bitrate, extra_args, or metadata '
'is not supported when writer is an existing '
'MovieWriter instance. These should instead be '
'passed as arguments when creating the '
'MovieWriter instance.')
if savefig_kwargs is None:
savefig_kwargs = {}
else:
# we are going to mutate this below
savefig_kwargs = dict(savefig_kwargs)
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# Reuse the savefig DPI for ours if none is given.
dpi = mpl._val_or_rc(dpi, 'savefig.dpi')
if dpi == 'figure':
dpi = self._fig.dpi
writer_kwargs = {}
if codec is not None:
writer_kwargs['codec'] = codec
if bitrate is not None:
writer_kwargs['bitrate'] = bitrate
if extra_args is not None:
writer_kwargs['extra_args'] = extra_args
if metadata is not None:
writer_kwargs['metadata'] = metadata
# If we have the name of a writer, instantiate an instance of the
# registered class.
if isinstance(writer, str):
try:
writer_cls = writers[writer]
except RuntimeError: # Raised if not available.
writer_cls = PillowWriter # Always available.
_log.warning("MovieWriter %s unavailable; using Pillow "
"instead.", writer)
writer = writer_cls(fps, **writer_kwargs)
_log.info('Animation.save using %s', type(writer))
if 'bbox_inches' in savefig_kwargs:
_log.warning("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it may cause frame size "
"to vary, which is inappropriate for animation.")
savefig_kwargs.pop('bbox_inches')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existent use case or find a way to make it work.
def _pre_composite_to_white(color):
r, g, b, a = mcolors.to_rgba(color)
return a * np.array([r, g, b]) + 1 - a
# canvas._is_saving = True makes the draw_event animation-starting
# callback a no-op; canvas.manager = None prevents resizing the GUI
# widget (both are likewise done in savefig()).
with (writer.saving(self._fig, filename, dpi),
cbook._setattr_cm(self._fig.canvas, _is_saving=True, manager=None)):
if not writer._supports_transparency():
facecolor = savefig_kwargs.get('facecolor',
mpl.rcParams['savefig.facecolor'])
if facecolor == 'auto':
facecolor = self._fig.get_facecolor()
savefig_kwargs['facecolor'] = _pre_composite_to_white(facecolor)
savefig_kwargs['transparent'] = False # just to be safe!
for anim in all_anim:
anim._init_draw() # Clear the initial frame
frame_number = 0
# TODO: Currently only FuncAnimation has a save_count
# attribute. Can we generalize this to all Animations?
save_count_list = [getattr(a, '_save_count', None)
for a in all_anim]
if None in save_count_list:
total_frames = None
else:
total_frames = sum(save_count_list)
for data in zip(*[a.new_saved_frame_seq() for a in all_anim]):
for anim, d in zip(all_anim, data):
# TODO: See if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
if progress_callback is not None:
progress_callback(frame_number, total_frames)
frame_number += 1
writer.grab_frame(**savefig_kwargs)
def _step(self, *args):
"""
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
"""
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
"""Return a new sequence of frame information."""
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
"""Return a new sequence of saved/cached frame information."""
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
self._draw_was_started = True
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = {a.axes for a in artists}
# Enumerate artists to cache Axes backgrounds. We do not draw
# artists yet to not cache foreground from plots with shared Axes
for ax in updated_ax:
# If we haven't cached the background for the current view of this
# Axes object, do so now. This might not always be reliable, but
# it's an attempt to automate the process.
cur_view = ax._get_view()
view, bg = self._blit_cache.get(ax, (object(), None))
if cur_view != view:
self._blit_cache[ax] = (
cur_view, ax.figure.canvas.copy_from_bbox(ax.bbox))
# Make a separate pass to draw foreground.
for a in artists:
a.axes.draw_artist(a)
# After rendering all the needed artists, blit each Axes individually.
for ax in updated_ax:
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists):
# Get a list of the Axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = {a.axes for a in artists}
for ax in axes:
try:
view, bg = self._blit_cache[ax]
except KeyError:
continue
if ax._get_view() == view:
ax.figure.canvas.restore_region(bg)
else:
self._blit_cache.pop(ax)
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the Axes
self._blit_cache = dict()
self._drawn_artists = []
# _post_draw needs to be called first to initialize the renderer
self._post_draw(None, self._blit)
# Then we need to clear the Frame for the initial draw
# This is typically handled in _on_resize because QT and Tk
# emit a resize event on launch, but the macosx backend does not,
# thus we force it here for everyone for consistency
self._init_draw()
# Connect to future resize events
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._on_resize)
def _on_resize(self, event):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, event):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, False)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._on_resize)
def to_html5_video(self, embed_limit=None):
"""
Convert the animation to an HTML5 ``
venv\Lib\site-packages\matplotlib\artist.py
from collections import namedtuple
import contextlib
from functools import cache, reduce, wraps
import inspect
from inspect import Signature, Parameter
import logging
from numbers import Number, Real
import operator
import re
import warnings
import numpy as np
import matplotlib as mpl
from . import _api, cbook
from .path import Path
from .transforms import (BboxBase, Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
_log = logging.getLogger(__name__)
def _prevent_rasterization(draw):
# We assume that by default artists are not allowed to rasterize (unless
# its draw method is explicitly decorated). If it is being drawn after a
# rasterized artist and it has reached a raster_depth of 0, we stop
# rasterization so that it does not affect the behavior of normal artist
# (e.g., change in dpi).
@wraps(draw)
def draw_wrapper(artist, renderer, *args, **kwargs):
if renderer._raster_depth == 0 and renderer._rasterizing:
# Only stop when we are not in a rasterized parent
# and something has been rasterized since last stop.
renderer.stop_rasterizing()
renderer._rasterizing = False
return draw(artist, renderer, *args, **kwargs)
draw_wrapper._supports_rasterization = False
return draw_wrapper
def allow_rasterization(draw):
"""
Decorator for Artist.draw method. Provides routines
that run before and after the draw call. The before and after functions
are useful for changing artist-dependent renderer attributes or making
other setup function calls, such as starting and flushing a mixed-mode
renderer.
"""
@wraps(draw)
def draw_wrapper(artist, renderer):
try:
if artist.get_rasterized():
if renderer._raster_depth == 0 and not renderer._rasterizing:
renderer.start_rasterizing()
renderer._rasterizing = True
renderer._raster_depth += 1
else:
if renderer._raster_depth == 0 and renderer._rasterizing:
# Only stop when we are not in a rasterized parent
# and something has be rasterized since last stop
renderer.stop_rasterizing()
renderer._rasterizing = False
if artist.get_agg_filter() is not None:
renderer.start_filter()
return draw(artist, renderer)
finally:
if artist.get_agg_filter() is not None:
renderer.stop_filter(artist.get_agg_filter())
if artist.get_rasterized():
renderer._raster_depth -= 1
if (renderer._rasterizing and (fig := artist.get_figure(root=True)) and
fig.suppressComposite):
# restart rasterizing to prevent merging
renderer.stop_rasterizing()
renderer.start_rasterizing()
draw_wrapper._supports_rasterization = True
return draw_wrapper
def _finalize_rasterization(draw):
"""
Decorator for Artist.draw method. Needed on the outermost artist, i.e.
Figure, to finish up if the render is still in rasterized mode.
"""
@wraps(draw)
def draw_wrapper(artist, renderer, *args, **kwargs):
result = draw(artist, renderer, *args, **kwargs)
if renderer._rasterizing:
renderer.stop_rasterizing()
renderer._rasterizing = False
return result
return draw_wrapper
def _stale_axes_callback(self, val):
if self.axes:
self.axes.stale = val
_XYPair = namedtuple("_XYPair", "x y")
class _Unset:
def __repr__(self):
return ""
_UNSET = _Unset()
class Artist:
"""
Abstract base class for objects that render into a FigureCanvas.
Typically, all visible elements in a figure are subclasses of Artist.
"""
zorder = 0
def __init_subclass__(cls):
# Decorate draw() method so that all artists are able to stop
# rastrization when necessary. If the artist's draw method is already
# decorated (has a `_supports_rasterization` attribute), it won't be
# decorated.
if not hasattr(cls.draw, "_supports_rasterization"):
cls.draw = _prevent_rasterization(cls.draw)
# Inject custom set() methods into the subclass with signature and
# docstring based on the subclasses' properties.
if not hasattr(cls.set, '_autogenerated_signature'):
# Don't overwrite cls.set if the subclass or one of its parents
# has defined a set method set itself.
# If there was no explicit definition, cls.set is inherited from
# the hierarchy of auto-generated set methods, which hold the
# flag _autogenerated_signature.
return
cls.set = lambda self, **kwargs: Artist.set(self, **kwargs)
cls.set.__name__ = "set"
cls.set.__qualname__ = f"{cls.__qualname__}.set"
cls._update_set_signature_and_docstring()
_PROPERTIES_EXCLUDED_FROM_SET = [
'navigate_mode', # not a user-facing function
'figure', # changing the figure is such a profound operation
# that we don't want this in set()
'3d_properties', # cannot be used as a keyword due to leading digit
]
@classmethod
def _update_set_signature_and_docstring(cls):
"""
Update the signature of the set function to list all properties
as keyword arguments.
Property aliases are not listed in the signature for brevity, but
are still accepted as keyword arguments.
"""
cls.set.__signature__ = Signature(
[Parameter("self", Parameter.POSITIONAL_OR_KEYWORD),
*[Parameter(prop, Parameter.KEYWORD_ONLY, default=_UNSET)
for prop in ArtistInspector(cls).get_setters()
if prop not in Artist._PROPERTIES_EXCLUDED_FROM_SET]])
cls.set._autogenerated_signature = True
cls.set.__doc__ = (
"Set multiple properties at once.\n\n"
"Supported properties are\n\n"
+ kwdoc(cls))
def __init__(self):
self._stale = True
self.stale_callback = None
self._axes = None
self._parent_figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = None
self.clipbox = None
self._clippath = None
self._clipon = True
self._label = ''
self._picker = None
self._rasterized = False
self._agg_filter = None
# Normally, artist classes need to be queried for mouseover info if and
# only if they override get_cursor_data.
self._mouseover = type(self).get_cursor_data != Artist.get_cursor_data
self._callbacks = cbook.CallbackRegistry(signals=["pchanged"])
try:
self.axes = None
except AttributeError:
# Handle self.axes as a read-only property, as in Figure.
pass
self._remove_method = None
self._url = None
self._gid = None
self._snap = None
self._sketch = mpl.rcParams['path.sketch']
self._path_effects = mpl.rcParams['path.effects']
self._sticky_edges = _XYPair([], [])
self._in_layout = True
def __getstate__(self):
d = self.__dict__.copy()
d['stale_callback'] = None
return d
def remove(self):
"""
Remove the artist from the figure if possible.
The effect will not be visible until the figure is redrawn, e.g.,
with `.FigureCanvasBase.draw_idle`. Call `~.axes.Axes.relim` to
update the Axes limits if desired.
Note: `~.axes.Axes.relim` will not see collections even if the
collection was added to the Axes with *autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead, the parent should
# set the _remove_method attribute directly. This would be a
# protected attribute if Python supported that sort of thing. The
# callback has one parameter, which is the child to be removed.
if self._remove_method is not None:
self._remove_method(self)
# clear stale callback
self.stale_callback = None
_ax_flag = False
if hasattr(self, 'axes') and self.axes:
# remove from the mouse hit list
self.axes._mouseover_set.discard(self)
self.axes.stale = True
self.axes = None # decouple the artist from the Axes
_ax_flag = True
if (fig := self.get_figure(root=False)) is not None:
if not _ax_flag:
fig.stale = True
self._parent_figure = None
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property of
# whether or not the artist should affect the limits. Then there will
# be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
"""Return whether units are set on any axis."""
ax = self.axes
return ax and any(axis.have_units() for axis in ax._axis_map.values())
def convert_xunits(self, x):
"""
Convert *x* using the unit type of the xaxis.
If the artist is not contained in an Axes or if the xaxis does not
have units, *x* itself is returned.
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""
Convert *y* using the unit type of the yaxis.
If the artist is not contained in an Axes or if the yaxis does not
have units, *y* itself is returned.
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None:
return y
return ax.yaxis.convert_units(y)
@property
def axes(self):
"""The `~.axes.Axes` instance the artist resides in, or *None*."""
return self._axes
@axes.setter
def axes(self, new_axes):
if (new_axes is not None and self._axes is not None
and new_axes != self._axes):
raise ValueError("Can not reset the Axes. You are probably trying to reuse "
"an artist in more than one Axes which is not supported")
self._axes = new_axes
if new_axes is not None and new_axes is not self:
self.stale_callback = _stale_axes_callback
@property
def stale(self):
"""
Whether the artist is 'stale' and needs to be re-drawn for the output
to match the internal state of the artist.
"""
return self._stale
@stale.setter
def stale(self, val):
self._stale = val
# if the artist is animated it does not take normal part in the
# draw stack and is not expected to be drawn as part of the normal
# draw loop (when not saving) so do not propagate this change
if self._animated:
return
if val and self.stale_callback is not None:
self.stale_callback(self, val)
def get_window_extent(self, renderer=None):
"""
Get the artist's bounding box in display space.
The bounding box' width and height are nonnegative.
Subclasses should override for inclusion in the bounding box
"tight" calculation. Default is to return an empty bounding
box at 0, 0.
Be careful when using this function, the results will not update
if the artist window extent of the artist changes. The extent
can change due to any changes in the transform stack, such as
changing the Axes limits, the figure size, or the canvas used
(as is done when saving a figure). This can lead to unexpected
behavior where interactive figures will look fine on the screen,
but will save incorrectly.
"""
return Bbox([[0, 0], [0, 0]])
def get_tightbbox(self, renderer=None):
"""
Like `.Artist.get_window_extent`, but includes any clipping.
Parameters
----------
renderer : `~matplotlib.backend_bases.RendererBase` subclass, optional
renderer that will be used to draw the figures (i.e.
``fig.canvas.get_renderer()``)
Returns
-------
`.Bbox` or None
The enclosing bounding box (in figure pixel coordinates).
Returns None if clipping results in no intersection.
"""
bbox = self.get_window_extent(renderer)
if self.get_clip_on():
clip_box = self.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = self.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox, clip_path.get_extents())
return bbox
def add_callback(self, func):
"""
Add a callback function that will be called whenever one of the
`.Artist`'s properties changes.
Parameters
----------
func : callable
The callback function. It must have the signature::
def func(artist: Artist) -> Any
where *artist* is the calling `.Artist`. Return values may exist
but are ignored.
Returns
-------
int
The observer id associated with the callback. This id can be
used for removing the callback with `.remove_callback` later.
See Also
--------
remove_callback
"""
# Wrapping func in a lambda ensures it can be connected multiple times
# and never gets weakref-gc'ed.
return self._callbacks.connect("pchanged", lambda: func(self))
def remove_callback(self, oid):
"""
Remove a callback based on its observer id.
See Also
--------
add_callback
"""
self._callbacks.disconnect(oid)
def pchanged(self):
"""
Call all of the registered callbacks.
This function is triggered internally when a property is changed.
See Also
--------
add_callback
remove_callback
"""
self._callbacks.process("pchanged")
def is_transform_set(self):
"""
Return whether the Artist has an explicitly set transform.
This is *True* after `.set_transform` has been called.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the artist transform.
Parameters
----------
t : `~matplotlib.transforms.Transform`
"""
self._transform = t
self._transformSet = True
self.pchanged()
self.stale = True
def get_transform(self):
"""Return the `.Transform` instance used by this artist."""
if self._transform is None:
self._transform = IdentityTransform()
elif (not isinstance(self._transform, Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def get_children(self):
r"""Return a list of the child `.Artist`\s of this `.Artist`."""
return []
def _different_canvas(self, event):
"""
Check whether an *event* occurred on a canvas other that this artist's canvas.
If this method returns True, the event definitely occurred on a different
canvas; if it returns False, either it occurred on the same canvas, or we may
not have enough information to know.
Subclasses should start their definition of `contains` as follows::
if self._different_canvas(mouseevent):
return False, {}
# subclass-specific implementation follows
"""
return (getattr(event, "canvas", None) is not None
and (fig := self.get_figure(root=True)) is not None
and event.canvas is not fig.canvas)
def contains(self, mouseevent):
"""
Test whether the artist contains the mouse event.
Parameters
----------
mouseevent : `~matplotlib.backend_bases.MouseEvent`
Returns
-------
contains : bool
Whether any values are within the radius.
details : dict
An artist-specific dictionary of details of the event context,
such as which points are contained in the pick radius. See the
individual Artist subclasses for details.
"""
_log.warning("%r needs 'contains' method", self.__class__.__name__)
return False, {}
def pickable(self):
"""
Return whether the artist is pickable.
See Also
--------
.Artist.set_picker, .Artist.get_picker, .Artist.pick
"""
return self.get_figure(root=False) is not None and self._picker is not None
def pick(self, mouseevent):
"""
Process a pick event.
Each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set.
See Also
--------
.Artist.set_picker, .Artist.get_picker, .Artist.pickable
"""
from .backend_bases import PickEvent # Circular import.
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside, prop = picker(self, mouseevent)
else:
inside, prop = self.contains(mouseevent)
if inside:
PickEvent("pick_event", self.get_figure(root=True).canvas,
mouseevent, self, **prop)._process()
# Pick children
for a in self.get_children():
# make sure the event happened in the same Axes
ax = getattr(a, 'axes', None)
if (isinstance(a, mpl.figure.SubFigure)
or mouseevent.inaxes is None or ax is None
or mouseevent.inaxes == ax):
# we need to check if mouseevent.inaxes is None
# because some objects associated with an Axes (e.g., a
# tick label) can be outside the bounding box of the
# Axes and inaxes will be None
# also check that ax is None so that it traverse objects
# which do not have an axes property but children might
a.pick(mouseevent)
def set_picker(self, picker):
"""
Define the picking behavior of the artist.
Parameters
----------
picker : None or bool or float or callable
This can be one of the following:
- *None*: Picking is disabled for this artist (default).
- A boolean: If *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist.
- A float: If picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if its data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g., the indices of the data within
epsilon of the pick event
- A function: If picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
"""
self._picker = picker
def get_picker(self):
"""
Return the picking behavior of the artist.
The possible values are described in `.Artist.set_picker`.
See Also
--------
.Artist.set_picker, .Artist.pickable, .Artist.pick
"""
return self._picker
def get_url(self):
"""Return the url."""
return self._url
def set_url(self, url):
"""
Set the url for the artist.
Parameters
----------
url : str
"""
self._url = url
def get_gid(self):
"""Return the group id."""
return self._gid
def set_gid(self, gid):
"""
Set the (group) id for the artist.
Parameters
----------
gid : str
"""
self._gid = gid
def get_snap(self):
"""
Return the snap setting.
See `.set_snap` for details.
"""
if mpl.rcParams['path.snap']:
return self._snap
else:
return False
def set_snap(self, snap):
"""
Set the snapping behavior.
Snapping aligns positions with the pixel grid, which results in
clearer images. For example, if a black line of 1px width was
defined at a position in between two pixels, the resulting image
would contain the interpolated value of that line in the pixel grid,
which would be a grey value on both adjacent pixel positions. In
contrast, snapping will move the line to the nearest integer pixel
value, so that the resulting image will really contain a 1px wide
black line.
Snapping is currently only supported by the Agg and MacOSX backends.
Parameters
----------
snap : bool or None
Possible values:
- *True*: Snap vertices to the nearest pixel center.
- *False*: Do not modify vertex positions.
- *None*: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center.
"""
self._snap = snap
self.stale = True
def get_sketch_params(self):
"""
Return the sketch parameters for the artist.
Returns
-------
tuple or None
A 3-tuple with the following elements:
- *scale*: The amplitude of the wiggle perpendicular to the
source line.
- *length*: The length of the wiggle along the line.
- *randomness*: The scale factor by which the length is
shrunken or expanded.
Returns *None* if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Set the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
The PGF backend uses this argument as an RNG seed and not as
described above. Using the same seed yields the same random shape.
.. ACCEPTS: (scale: float, length: float, randomness: float)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
self.stale = True
def set_path_effects(self, path_effects):
"""
Set the path effects.
Parameters
----------
path_effects : list of `.AbstractPathEffect`
"""
self._path_effects = path_effects
self.stale = True
def get_path_effects(self):
return self._path_effects
def get_figure(self, root=False):
"""
Return the `.Figure` or `.SubFigure` instance the artist belongs to.
Parameters
----------
root : bool, default=False
If False, return the (Sub)Figure this artist is on. If True,
return the root Figure for a nested tree of SubFigures.
"""
if root and self._parent_figure is not None:
return self._parent_figure.get_figure(root=True)
return self._parent_figure
def set_figure(self, fig):
"""
Set the `.Figure` or `.SubFigure` instance the artist belongs to.
Parameters
----------
fig : `~matplotlib.figure.Figure` or `~matplotlib.figure.SubFigure`
"""
# if this is a no-op just return
if self._parent_figure is fig:
return
# if we currently have a figure (the case of both `self.figure`
# and *fig* being none is taken care of above) we then user is
# trying to change the figure an artist is associated with which
# is not allowed for the same reason as adding the same instance
# to more than one Axes
if self._parent_figure is not None:
raise RuntimeError("Can not put single artist in "
"more than one figure")
self._parent_figure = fig
if self._parent_figure and self._parent_figure is not self:
self.pchanged()
self.stale = True
figure = property(get_figure, set_figure,
doc=("The (Sub)Figure that the artist is on. For more "
"control, use the `get_figure` method."))
def set_clip_box(self, clipbox):
"""
Set the artist's clip `.Bbox`.
Parameters
----------
clipbox : `~matplotlib.transforms.BboxBase` or None
Will typically be created from a `.TransformedBbox`. For instance,
``TransformedBbox(Bbox([[0, 0], [1, 1]]), ax.transAxes)`` is the default
clipping for an artist added to an Axes.
"""
_api.check_isinstance((BboxBase, None), clipbox=clipbox)
if clipbox != self.clipbox:
self.clipbox = clipbox
self.pchanged()
self.stale = True
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path.
Parameters
----------
path : `~matplotlib.patches.Patch` or `.Path` or `.TransformedPath` or None
The clip path. If given a `.Path`, *transform* must be provided as
well. If *None*, a previously set clip path is removed.
transform : `~matplotlib.transforms.Transform`, optional
Only used if *path* is a `.Path`, in which case the given `.Path`
is converted to a `.TransformedPath` using *transform*.
Notes
-----
For efficiency, if *path* is a `.Rectangle` this method will set the
clipping box to the corresponding rectangle and set the clipping path
to ``None``.
For technical reasons (support of `~.Artist.set`), a tuple
(*path*, *transform*) is also accepted as a single positional
parameter.
.. ACCEPTS: Patch or (Path, Transform) or None
"""
from matplotlib.patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(),
path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPatchPath(path)
success = True
elif isinstance(path, tuple):
path, transform = path
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
elif isinstance(path, TransformedPatchPath):
self._clippath = path
success = True
elif isinstance(path, TransformedPath):
self._clippath = path
success = True
if not success:
raise TypeError(
"Invalid arguments to set_clip_path, of type "
f"{type(path).__name__} and {type(transform).__name__}")
# This may result in the callbacks being hit twice, but guarantees they
# will be hit at least once.
self.pchanged()
self.stale = True
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends.
"""
return self._alpha
def get_visible(self):
"""Return the visibility."""
return self._visible
def get_animated(self):
"""Return whether the artist is animated."""
return self._animated
def get_in_layout(self):
"""
Return boolean flag, ``True`` if artist is included in layout
calculations.
E.g. :ref:`constrainedlayout_guide`,
`.Figure.tight_layout()`, and
``fig.savefig(fname, bbox_inches='tight')``.
"""
return self._in_layout
def _fully_clipped_to_axes(self):
"""
Return a boolean flag, ``True`` if the artist is clipped to the Axes
and can thus be skipped in layout calculations. Requires `get_clip_on`
is True, one of `clip_box` or `clip_path` is set, ``clip_box.extents``
is equivalent to ``ax.bbox.extents`` (if set), and ``clip_path._patch``
is equivalent to ``ax.patch`` (if set).
"""
# Note that ``clip_path.get_fully_transformed_path().get_extents()``
# cannot be directly compared to ``axes.bbox.extents`` because the
# extents may be undefined (i.e. equivalent to ``Bbox.null()``)
# before the associated artist is drawn, and this method is meant
# to determine whether ``axes.get_tightbbox()`` may bypass drawing
clip_box = self.get_clip_box()
clip_path = self.get_clip_path()
return (self.axes is not None
and self.get_clip_on()
and (clip_box is not None or clip_path is not None)
and (clip_box is None
or np.all(clip_box.extents == self.axes.bbox.extents))
and (clip_path is None
or isinstance(clip_path, TransformedPatchPath)
and clip_path._patch is self.axes.patch))
def get_clip_on(self):
"""Return whether the artist uses clipping."""
return self._clipon
def get_clip_box(self):
"""Return the clipbox."""
return self.clipbox
def get_clip_path(self):
"""Return the clip path."""
return self._clippath
def get_transformed_clip_path_and_affine(self):
"""
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether the artist uses clipping.
When False, artists will be visible outside the Axes which
can lead to unexpected results.
Parameters
----------
b : bool
"""
self._clipon = b
# This may result in the callbacks being hit twice, but ensures they
# are hit at least once
self.pchanged()
self.stale = True
def _set_gc_clip(self, gc):
"""Set the clip properly for the gc."""
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def get_rasterized(self):
"""Return whether the artist is to be rasterized."""
return self._rasterized
def set_rasterized(self, rasterized):
"""
Force rasterized (bitmap) drawing for vector graphics output.
Rasterized drawing is not supported by all artists. If you try to
enable this on an artist that does not support it, the command has no
effect and a warning will be issued.
This setting is ignored for pixel-based output.
See also :doc:`/gallery/misc/rasterization_demo`.
Parameters
----------
rasterized : bool
"""
supports_rasterization = getattr(self.draw,
"_supports_rasterization", False)
if rasterized and not supports_rasterization:
_api.warn_external(f"Rasterization of '{self}' will be ignored")
self._rasterized = rasterized
def get_agg_filter(self):
"""Return filter function to be used for agg filter."""
return self._agg_filter
def set_agg_filter(self, filter_func):
"""
Set the agg filter.
Parameters
----------
filter_func : callable
A filter function, which takes a (m, n, depth) float array
and a dpi value, and returns a (m, n, depth) array and two
offsets from the bottom left corner of the image
.. ACCEPTS: a filter function, which takes a (m, n, 3) float array
and a dpi value, and returns a (m, n, 3) array and two offsets
from the bottom left corner of the image
"""
self._agg_filter = filter_func
self.stale = True
def draw(self, renderer):
"""
Draw the Artist (and its children) using the given renderer.
This has no effect if the artist is not visible (`.Artist.get_visible`
returns False).
Parameters
----------
renderer : `~matplotlib.backend_bases.RendererBase` subclass.
Notes
-----
This method is overridden in the Artist subclasses.
"""
if not self.get_visible():
return
self.stale = False
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
Parameters
----------
alpha : float or None
*alpha* must be within the 0-1 range, inclusive.
"""
if alpha is not None and not isinstance(alpha, Real):
raise TypeError(
f'alpha must be numeric or None, not {type(alpha)}')
if alpha is not None and not (0 <= alpha <= 1):
raise ValueError(f'alpha ({alpha}) is outside 0-1 range')
if alpha != self._alpha:
self._alpha = alpha
self.pchanged()
self.stale = True
def _set_alpha_for_array(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
Parameters
----------
alpha : array-like or float or None
All values must be within the 0-1 range, inclusive.
Masked values and nans are not supported.
"""
if isinstance(alpha, str):
raise TypeError("alpha must be numeric or None, not a string")
if not np.iterable(alpha):
Artist.set_alpha(self, alpha)
return
alpha = np.asarray(alpha)
if not (0 <= alpha.min() and alpha.max() <= 1):
raise ValueError('alpha must be between 0 and 1, inclusive, '
f'but min is {alpha.min()}, max is {alpha.max()}')
self._alpha = alpha
self.pchanged()
self.stale = True
def set_visible(self, b):
"""
Set the artist's visibility.
Parameters
----------
b : bool
"""
if b != self._visible:
self._visible = b
self.pchanged()
self.stale = True
def set_animated(self, b):
"""
Set whether the artist is intended to be used in an animation.
If True, the artist is excluded from regular drawing of the figure.
You have to call `.Figure.draw_artist` / `.Axes.draw_artist`
explicitly on the artist. This approach is used to speed up animations
using blitting.
See also `matplotlib.animation` and
:ref:`blitting`.
Parameters
----------
b : bool
"""
if self._animated != b:
self._animated = b
self.pchanged()
def set_in_layout(self, in_layout):
"""
Set if artist is to be included in layout calculations,
E.g. :ref:`constrainedlayout_guide`,
`.Figure.tight_layout()`, and
``fig.savefig(fname, bbox_inches='tight')``.
Parameters
----------
in_layout : bool
"""
self._in_layout = in_layout
def get_label(self):
"""Return the label used for this artist in the legend."""
return self._label
def set_label(self, s):
"""
Set a label that will be displayed in the legend.
Parameters
----------
s : object
*s* will be converted to a string by calling `str`.
"""
label = str(s) if s is not None else None
if label != self._label:
self._label = label
self.pchanged()
self.stale = True
def get_zorder(self):
"""Return the artist's zorder."""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
Parameters
----------
level : float
"""
if level is None:
level = self.__class__.zorder
if level != self.zorder:
self.zorder = level
self.pchanged()
self.stale = True
@property
def sticky_edges(self):
"""
``x`` and ``y`` sticky edge lists for autoscaling.
When performing autoscaling, if a data limit coincides with a value in
the corresponding sticky_edges list, then no margin will be added--the
view limit "sticks" to the edge. A typical use case is histograms,
where one usually expects no margin on the bottom edge (0) of the
histogram.
Moreover, margin expansion "bumps" against sticky edges and cannot
cross them. For example, if the upper data limit is 1.0, the upper
view limit computed by simple margin application is 1.2, but there is a
sticky edge at 1.1, then the actual upper view limit will be 1.1.
This attribute cannot be assigned to; however, the ``x`` and ``y``
lists can be modified in place as needed.
Examples
--------
>>> artist.sticky_edges.x[:] = (xmin, xmax)
>>> artist.sticky_edges.y[:] = (ymin, ymax)
"""
return self._sticky_edges
def update_from(self, other):
"""Copy properties from *other* to *self*."""
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._label = other._label
self._sketch = other._sketch
self._path_effects = other._path_effects
self.sticky_edges.x[:] = other.sticky_edges.x.copy()
self.sticky_edges.y[:] = other.sticky_edges.y.copy()
self.pchanged()
self.stale = True
def properties(self):
"""Return a dictionary of all the properties of the artist."""
return ArtistInspector(self).properties()
def _update_props(self, props, errfmt):
"""
Helper for `.Artist.set` and `.Artist.update`.
*errfmt* is used to generate error messages for invalid property
names; it gets formatted with ``type(self)`` for "{cls}" and the
property name for "{prop_name}".
"""
ret = []
with cbook._setattr_cm(self, eventson=False):
for k, v in props.items():
# Allow attributes we want to be able to update through
# art.update, art.set, setp.
if k == "axes":
ret.append(setattr(self, k, v))
else:
func = getattr(self, f"set_{k}", None)
if not callable(func):
raise AttributeError(
errfmt.format(cls=type(self), prop_name=k),
name=k)
ret.append(func(v))
if ret:
self.pchanged()
self.stale = True
return ret
def update(self, props):
"""
Update this artist's properties from the dict *props*.
Parameters
----------
props : dict
"""
return self._update_props(
props, "{cls.__name__!r} object has no property {prop_name!r}")
def _internal_update(self, kwargs):
"""
Update artist properties without prenormalizing them, but generating
errors as if calling `set`.
The lack of prenormalization is to maintain backcompatibility.
"""
return self._update_props(
kwargs, "{cls.__name__}.set() got an unexpected keyword argument "
"{prop_name!r}")
def set(self, **kwargs):
# docstring and signature are auto-generated via
# Artist._update_set_signature_and_docstring() at the end of the
# module.
return self._internal_update(cbook.normalize_kwargs(kwargs, self))
@contextlib.contextmanager
def _cm_set(self, **kwargs):
"""
`.Artist.set` context-manager that restores original values at exit.
"""
orig_vals = {k: getattr(self, f"get_{k}")() for k in kwargs}
try:
self.set(**kwargs)
yield
finally:
self.set(**orig_vals)
def findobj(self, match=None, include_self=True):
"""
Find artist objects.
Recursively find all `.Artist` instances contained in the artist.
Parameters
----------
match
A filter criterion for the matches. This can be
- *None*: Return all objects contained in artist.
- A function with signature ``def match(artist: Artist) -> bool``.
The result will only contain artists for which the function
returns *True*.
- A class instance: e.g., `.Line2D`. The result will only contain
artists of this class or its subclasses (``isinstance`` check).
include_self : bool
Include *self* in the list to be checked for a match.
Returns
-------
list of `.Artist`
"""
if match is None: # always return True
def matchfunc(x):
return True
elif isinstance(match, type) and issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, a matplotlib.artist.Artist '
'subclass, or a callable')
artists = reduce(operator.iadd,
[c.findobj(matchfunc) for c in self.get_children()], [])
if include_self and matchfunc(self):
artists.append(self)
return artists
def get_cursor_data(self, event):
"""
Return the cursor data for a given event.
.. note::
This method is intended to be overridden by artist subclasses.
As an end-user of Matplotlib you will most likely not call this
method yourself.
Cursor data can be used by Artists to provide additional context
information for a given event. The default implementation just returns
*None*.
Subclasses can override the method and return arbitrary data. However,
when doing so, they must ensure that `.format_cursor_data` can convert
the data to a string representation.
The only current use case is displaying the z-value of an `.AxesImage`
in the status bar of a plot window, while moving the mouse.
Parameters
----------
event : `~matplotlib.backend_bases.MouseEvent`
See Also
--------
format_cursor_data
"""
return None
def format_cursor_data(self, data):
"""
Return a string representation of *data*.
.. note::
This method is intended to be overridden by artist subclasses.
As an end-user of Matplotlib you will most likely not call this
method yourself.
The default implementation converts ints and floats and arrays of ints
and floats into a comma-separated string enclosed in square brackets,
unless the artist has an associated colorbar, in which case scalar
values are formatted using the colorbar's formatter.
See Also
--------
get_cursor_data
"""
if np.ndim(data) == 0 and hasattr(self, "_format_cursor_data_override"):
# workaround for ScalarMappable to be able to define its own
# format_cursor_data(). See ScalarMappable._format_cursor_data_override
# for details.
return self._format_cursor_data_override(data)
else:
try:
data[0]
except (TypeError, IndexError):
data = [data]
data_str = ', '.join(f'{item:0.3g}' for item in data
if isinstance(item, Number))
return "[" + data_str + "]"
def get_mouseover(self):
"""
Return whether this artist is queried for custom context information
when the mouse cursor moves over it.
"""
return self._mouseover
def set_mouseover(self, mouseover):
"""
Set whether this artist is queried for custom context information when
the mouse cursor moves over it.
Parameters
----------
mouseover : bool
See Also
--------
get_cursor_data
.ToolCursorPosition
.NavigationToolbar2
"""
self._mouseover = bool(mouseover)
ax = self.axes
if ax:
if self._mouseover:
ax._mouseover_set.add(self)
else:
ax._mouseover_set.discard(self)
mouseover = property(get_mouseover, set_mouseover) # backcompat.
def _get_tightbbox_for_layout_only(obj, *args, **kwargs):
"""
Matplotlib's `.Axes.get_tightbbox` and `.Axis.get_tightbbox` support a
*for_layout_only* kwarg; this helper tries to use the kwarg but skips it
when encountering third-party subclasses that do not support it.
"""
try:
return obj.get_tightbbox(*args, **{**kwargs, "for_layout_only": True})
except TypeError:
return obj.get_tightbbox(*args, **kwargs)
class ArtistInspector:
"""
A helper class to inspect an `~matplotlib.artist.Artist` and return
information about its settable properties and their current values.
"""
def __init__(self, o):
r"""
Initialize the artist inspector with an `Artist` or an iterable of
`Artist`\s. If an iterable is used, we assume it is a homogeneous
sequence (all `Artist`\s are of the same type) and it is your
responsibility to make sure this is so.
"""
if not isinstance(o, Artist):
if np.iterable(o):
o = list(o)
if len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping property fullnames to sets of aliases for each alias
in the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': {'mfc'},
'linewidth' : {'lw'},
}
"""
names = [name for name in dir(self.o)
if name.startswith(('set_', 'get_'))
and callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
propname = re.search(f"`({name[:4]}.*)`", # get_.*/set_.*
inspect.getdoc(func)).group(1)
aliases.setdefault(propname[4:], set()).add(name[4:])
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*(?:\.\.\s+)?ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the setter for a line that
begins with "ACCEPTS:" or ".. ACCEPTS:", and then by looking for a
numpydoc-style documentation for the setter's first argument.
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError(f'{self.o} has no function {name}')
func = getattr(self.o, name)
if hasattr(func, '_kwarg_doc'):
return func._kwarg_doc
docstring = inspect.getdoc(func)
if docstring is None:
return 'unknown'
if docstring.startswith('Alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
# Much faster than list(inspect.signature(func).parameters)[1],
# although barely relevant wrt. matplotlib's total import time.
param_name = func.__code__.co_varnames[1]
# We could set the presence * based on whether the parameter is a
# varargs (it can't be a varkwargs) but it's not really worth it.
match = re.search(fr"(?m)^ *\*?{param_name} : (.+)", docstring)
if match:
return match.group(1)
return 'unknown'
def _replace_path(self, source_class):
"""
Changes the full path to the public API path that is used
in sphinx. This is needed for links to work.
"""
replace_dict = {'_base._AxesBase': 'Axes',
'_axes.Axes': 'Axes'}
for key, value in replace_dict.items():
source_class = source_class.replace(key, value)
return source_class
def get_setters(self):
"""
Get the attribute strings with setters for object.
For example, for a line, return ``['markerfacecolor', 'linewidth',
....]``.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
func = getattr(self.o, name)
if (not callable(func)
or self.number_of_parameters(func) < 2
or self.is_alias(func)):
continue
setters.append(name[4:])
return setters
@staticmethod
@cache
def number_of_parameters(func):
"""Return number of parameters of the callable *func*."""
return len(inspect.signature(func).parameters)
@staticmethod
@cache
def is_alias(method):
"""
Return whether the object *method* is an alias for another method.
"""
ds = inspect.getdoc(method)
if ds is None:
return False
return ds.startswith('Alias for ')
def aliased_name(self, s):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME'.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
aliases = ''.join(' or %s' % x for x in sorted(self.aliasd.get(s, [])))
return s + aliases
_NOT_LINKABLE = {
# A set of property setter methods that are not available in our
# current docs. This is a workaround used to prevent trying to link
# these setters which would lead to "target reference not found"
# warnings during doc build.
'matplotlib.image._ImageBase.set_alpha',
'matplotlib.image._ImageBase.set_array',
'matplotlib.image._ImageBase.set_data',
'matplotlib.image._ImageBase.set_filternorm',
'matplotlib.image._ImageBase.set_filterrad',
'matplotlib.image._ImageBase.set_interpolation',
'matplotlib.image._ImageBase.set_interpolation_stage',
'matplotlib.image._ImageBase.set_resample',
'matplotlib.text._AnnotationBase.set_annotation_clip',
}
def aliased_name_rest(self, s, target):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME',
formatted for reST.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
# workaround to prevent "reference target not found"
if target in self._NOT_LINKABLE:
return f'``{s}``'
aliases = ''.join(
f' or :meth:`{a} <{target}>`' for a in sorted(self.aliasd.get(s, [])))
return f':meth:`{s} <{target}>`{aliases}'
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable
properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return f'{pad}{prop}: {accepts}'
lines = []
for prop in sorted(self.get_setters()):
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append(f'{pad}{name}: {accepts}')
return lines
def pprint_setters_rest(self, prop=None, leadingspace=4):
"""
If *prop* is *None*, return a list of reST-formatted strings of all
settable properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of "property : valid"
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return f'{pad}{prop}: {accepts}'
prop_and_qualnames = []
for prop in sorted(self.get_setters()):
# Find the parent method which actually provides the docstring.
for cls in self.o.__mro__:
method = getattr(cls, f"set_{prop}", None)
if method and method.__doc__ is not None:
break
else: # No docstring available.
method = getattr(self.o, f"set_{prop}")
prop_and_qualnames.append(
(prop, f"{method.__module__}.{method.__qualname__}"))
names = [self.aliased_name_rest(prop, target)
.replace('_base._AxesBase', 'Axes')
.replace('_axes.Axes', 'Axes')
for prop, target in prop_and_qualnames]
accepts = [self.get_valid_values(prop)
for prop, _ in prop_and_qualnames]
col0_len = max(len(n) for n in names)
col1_len = max(len(a) for a in accepts)
table_formatstr = pad + ' ' + '=' * col0_len + ' ' + '=' * col1_len
return [
'',
pad + '.. table::',
pad + ' :class: property-table',
'',
table_formatstr,
pad + ' ' + 'Property'.ljust(col0_len)
+ ' ' + 'Description'.ljust(col1_len),
table_formatstr,
*[pad + ' ' + n.ljust(col0_len) + ' ' + a.ljust(col1_len)
for n, a in zip(names, accepts)],
table_formatstr,
'',
]
def properties(self):
"""Return a dictionary mapping property name -> value."""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_') and callable(getattr(o, name))]
getters.sort()
d = {}
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except Exception:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""Return the getters and actual values as list of strings."""
lines = []
for name, val in sorted(self.properties().items()):
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(f' {name} = {s}')
return lines
def getp(obj, property=None):
"""
Return the value of an `.Artist`'s *property*, or print all of them.
Parameters
----------
obj : `~matplotlib.artist.Artist`
The queried artist; e.g., a `.Line2D`, a `.Text`, or an `~.axes.Axes`.
property : str or None, default: None
If *property* is 'somename', this function returns
``obj.get_somename()``.
If it's None (or unset), it *prints* all gettable properties from
*obj*. Many properties have aliases for shorter typing, e.g. 'lw' is
an alias for 'linewidth'. In the output, aliases and full property
names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
See Also
--------
setp
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
return getattr(obj, 'get_' + property)()
# alias
get = getp
def setp(obj, *args, file=None, **kwargs):
"""
Set one or more properties on an `.Artist`, or list allowed values.
Parameters
----------
obj : `~matplotlib.artist.Artist` or list of `.Artist`
The artist(s) whose properties are being set or queried. When setting
properties, all artists are affected; when querying the allowed values,
only the first instance in the sequence is queried.
For example, two lines can be made thicker and red with a single call:
>>> x = arange(0, 1, 0.01)
>>> lines = plot(x, sin(2*pi*x), x, sin(4*pi*x))
>>> setp(lines, linewidth=2, color='r')
file : file-like, default: `sys.stdout`
Where `setp` writes its output when asked to list allowed values.
>>> with open('output.log') as file:
... setp(line, file=file)
The default, ``None``, means `sys.stdout`.
*args, **kwargs
The properties to set. The following combinations are supported:
- Set the linestyle of a line to be dashed:
>>> line, = plot([1, 2, 3])
>>> setp(line, linestyle='--')
- Set multiple properties at once:
>>> setp(line, linewidth=2, color='r')
- List allowed values for a line's linestyle:
>>> setp(line, 'linestyle')
linestyle: {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
- List all properties that can be set, and their allowed values:
>>> setp(line)
agg_filter: a filter function, ...
[long output listing omitted]
`setp` also supports MATLAB style string/value pairs. For example, the
following are equivalent:
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # Python style
See Also
--------
getp
"""
if isinstance(obj, Artist):
objs = [obj]
else:
objs = list(cbook.flatten(obj))
if not objs:
return
insp = ArtistInspector(objs[0])
if not kwargs and len(args) < 2:
if args:
print(insp.pprint_setters(prop=args[0]), file=file)
else:
print('\n'.join(insp.pprint_setters()), file=file)
return
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
funcvals = dict(zip(args[::2], args[1::2]))
ret = [o.update(funcvals) for o in objs] + [o.set(**kwargs) for o in objs]
return list(cbook.flatten(ret))
def kwdoc(artist):
r"""
Inspect an `~matplotlib.artist.Artist` class (using `.ArtistInspector`) and
return information about its settable properties and their current values.
Parameters
----------
artist : `~matplotlib.artist.Artist` or an iterable of `Artist`\s
Returns
-------
str
The settable properties of *artist*, as plain text if
:rc:`docstring.hardcopy` is False and as a rst table (intended for
use in Sphinx) if it is True.
"""
ai = ArtistInspector(artist)
return ('\n'.join(ai.pprint_setters_rest(leadingspace=4))
if mpl.rcParams['docstring.hardcopy'] else
'Properties:\n' + '\n'.join(ai.pprint_setters(leadingspace=4)))
# We defer this to the end of them module, because it needs ArtistInspector
# to be defined.
Artist._update_set_signature_and_docstring()
venv\Lib\site-packages\matplotlib\axis.py
"""
Classes for the ticks and x- and y-axis.
"""
import datetime
import functools
import logging
from numbers import Real
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook
import matplotlib.artist as martist
import matplotlib.colors as mcolors
import matplotlib.lines as mlines
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
_log = logging.getLogger(__name__)
GRIDLINE_INTERPOLATION_STEPS = 180
# This list is being used for compatibility with Axes.grid, which
# allows all Line2D kwargs.
_line_inspector = martist.ArtistInspector(mlines.Line2D)
_line_param_names = _line_inspector.get_setters()
_line_param_aliases = [next(iter(d)) for d in _line_inspector.aliasd.values()]
_gridline_param_names = ['grid_' + name
for name in _line_param_names + _line_param_aliases]
class Tick(martist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels.
Ticks mark a position on an Axis. They contain two lines as markers and
two labels; one each for the bottom and top positions (in case of an
`.XAxis`) or for the left and right positions (in case of a `.YAxis`).
Attributes
----------
tick1line : `~matplotlib.lines.Line2D`
The left/bottom tick marker.
tick2line : `~matplotlib.lines.Line2D`
The right/top tick marker.
gridline : `~matplotlib.lines.Line2D`
The grid line associated with the label position.
label1 : `~matplotlib.text.Text`
The left/bottom tick label.
label2 : `~matplotlib.text.Text`
The right/top tick label.
"""
def __init__(
self, axes, loc, *,
size=None, # points
width=None,
color=None,
tickdir=None,
pad=None,
labelsize=None,
labelcolor=None,
labelfontfamily=None,
zorder=None,
gridOn=None, # defaults to axes.grid depending on axes.grid.which
tick1On=True,
tick2On=True,
label1On=True,
label2On=False,
major=True,
labelrotation=0,
grid_color=None,
grid_linestyle=None,
grid_linewidth=None,
grid_alpha=None,
**kwargs, # Other Line2D kwargs applied to gridlines.
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in points
"""
super().__init__()
if gridOn is None:
which = mpl.rcParams['axes.grid.which']
if major and (which in ('both', 'major')):
gridOn = mpl.rcParams['axes.grid']
elif (not major) and (which in ('both', 'minor')):
gridOn = mpl.rcParams['axes.grid']
else:
gridOn = False
self.set_figure(axes.get_figure(root=False))
self.axes = axes
self._loc = loc
self._major = major
name = self.__name__
major_minor = "major" if major else "minor"
if size is None:
size = mpl.rcParams[f"{name}.{major_minor}.size"]
self._size = size
if width is None:
width = mpl.rcParams[f"{name}.{major_minor}.width"]
self._width = width
if color is None:
color = mpl.rcParams[f"{name}.color"]
if pad is None:
pad = mpl.rcParams[f"{name}.{major_minor}.pad"]
self._base_pad = pad
if labelcolor is None:
labelcolor = mpl.rcParams[f"{name}.labelcolor"]
if cbook._str_equal(labelcolor, 'inherit'):
# inherit from tick color
labelcolor = mpl.rcParams[f"{name}.color"]
if labelsize is None:
labelsize = mpl.rcParams[f"{name}.labelsize"]
self._set_labelrotation(labelrotation)
if zorder is None:
if major:
zorder = mlines.Line2D.zorder + 0.01
else:
zorder = mlines.Line2D.zorder
self._zorder = zorder
grid_color = mpl._val_or_rc(grid_color, "grid.color")
grid_linestyle = mpl._val_or_rc(grid_linestyle, "grid.linestyle")
grid_linewidth = mpl._val_or_rc(grid_linewidth, "grid.linewidth")
if grid_alpha is None and not mcolors._has_alpha_channel(grid_color):
# alpha precedence: kwarg > color alpha > rcParams['grid.alpha']
# Note: only resolve to rcParams if the color does not have alpha
# otherwise `grid(color=(1, 1, 1, 0.5))` would work like
# grid(color=(1, 1, 1, 0.5), alpha=rcParams['grid.alpha'])
# so the that the rcParams default would override color alpha.
grid_alpha = mpl.rcParams["grid.alpha"]
grid_kw = {k[5:]: v for k, v in kwargs.items()}
self.tick1line = mlines.Line2D(
[], [],
color=color, linestyle="none", zorder=zorder, visible=tick1On,
markeredgecolor=color, markersize=size, markeredgewidth=width,
)
self.tick2line = mlines.Line2D(
[], [],
color=color, linestyle="none", zorder=zorder, visible=tick2On,
markeredgecolor=color, markersize=size, markeredgewidth=width,
)
self.gridline = mlines.Line2D(
[], [],
color=grid_color, alpha=grid_alpha, visible=gridOn,
linestyle=grid_linestyle, linewidth=grid_linewidth, marker="",
**grid_kw,
)
self.gridline.get_path()._interpolation_steps = \
GRIDLINE_INTERPOLATION_STEPS
self.label1 = mtext.Text(
np.nan, np.nan,
fontsize=labelsize, color=labelcolor, visible=label1On,
fontfamily=labelfontfamily, rotation=self._labelrotation[1])
self.label2 = mtext.Text(
np.nan, np.nan,
fontsize=labelsize, color=labelcolor, visible=label2On,
fontfamily=labelfontfamily, rotation=self._labelrotation[1])
self._apply_tickdir(tickdir)
for artist in [self.tick1line, self.tick2line, self.gridline,
self.label1, self.label2]:
self._set_artist_props(artist)
self.update_position(loc)
def _set_labelrotation(self, labelrotation):
if isinstance(labelrotation, str):
mode = labelrotation
angle = 0
elif isinstance(labelrotation, (tuple, list)):
mode, angle = labelrotation
else:
mode = 'default'
angle = labelrotation
_api.check_in_list(['auto', 'default'], labelrotation=mode)
self._labelrotation = (mode, angle)
@property
def _pad(self):
return self._base_pad + self.get_tick_padding()
def _apply_tickdir(self, tickdir):
"""Set tick direction. Valid values are 'out', 'in', 'inout'."""
# This method is responsible for verifying input and, in subclasses, for setting
# the tick{1,2}line markers. From the user perspective this should always be
# called through _apply_params, which further updates ticklabel positions using
# the new pads.
if tickdir is None:
tickdir = mpl.rcParams[f'{self.__name__}.direction']
else:
_api.check_in_list(['in', 'out', 'inout'], tickdir=tickdir)
self._tickdir = tickdir
def get_tickdir(self):
return self._tickdir
def get_tick_padding(self):
"""Get the length of the tick outside of the Axes."""
padding = {
'in': 0.0,
'inout': 0.5,
'out': 1.0
}
return self._size * padding[self._tickdir]
def get_children(self):
children = [self.tick1line, self.tick2line,
self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, path, transform=None):
# docstring inherited
super().set_clip_path(path, transform)
self.gridline.set_clip_path(path, transform)
self.stale = True
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
return False, {}
def set_pad(self, val):
"""
Set the tick label pad in points
Parameters
----------
val : float
"""
self._apply_params(pad=val)
self.stale = True
def get_pad(self):
"""Get the value of the tick label pad in points."""
return self._base_pad
def get_loc(self):
"""Return the tick location (data coords) as a scalar."""
return self._loc
@martist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
self.stale = False
return
renderer.open_group(self.__name__, gid=self.get_gid())
for artist in [self.gridline, self.tick1line, self.tick2line,
self.label1, self.label2]:
artist.draw(renderer)
renderer.close_group(self.__name__)
self.stale = False
def set_url(self, url):
"""
Set the url of label1 and label2.
Parameters
----------
url : str
"""
super().set_url(url)
self.label1.set_url(url)
self.label2.set_url(url)
self.stale = True
def _set_artist_props(self, a):
a.set_figure(self.get_figure(root=False))
def get_view_interval(self):
"""
Return the view limits ``(min, max)`` of the axis the tick belongs to.
"""
raise NotImplementedError('Derived must override')
def _apply_params(self, **kwargs):
for name, target in [("gridOn", self.gridline),
("tick1On", self.tick1line),
("tick2On", self.tick2line),
("label1On", self.label1),
("label2On", self.label2)]:
if name in kwargs:
target.set_visible(kwargs.pop(name))
if any(k in kwargs for k in ['size', 'width', 'pad', 'tickdir']):
self._size = kwargs.pop('size', self._size)
# Width could be handled outside this block, but it is
# convenient to leave it here.
self._width = kwargs.pop('width', self._width)
self._base_pad = kwargs.pop('pad', self._base_pad)
# _apply_tickdir uses _size and _base_pad to make _pad, and also
# sets the ticklines markers.
self._apply_tickdir(kwargs.pop('tickdir', self._tickdir))
for line in (self.tick1line, self.tick2line):
line.set_markersize(self._size)
line.set_markeredgewidth(self._width)
# _get_text1_transform uses _pad from _apply_tickdir.
trans = self._get_text1_transform()[0]
self.label1.set_transform(trans)
trans = self._get_text2_transform()[0]
self.label2.set_transform(trans)
tick_kw = {k: v for k, v in kwargs.items() if k in ['color', 'zorder']}
if 'color' in kwargs:
tick_kw['markeredgecolor'] = kwargs['color']
self.tick1line.set(**tick_kw)
self.tick2line.set(**tick_kw)
for k, v in tick_kw.items():
setattr(self, '_' + k, v)
if 'labelrotation' in kwargs:
self._set_labelrotation(kwargs.pop('labelrotation'))
self.label1.set(rotation=self._labelrotation[1])
self.label2.set(rotation=self._labelrotation[1])
label_kw = {k[5:]: v for k, v in kwargs.items()
if k in ['labelsize', 'labelcolor', 'labelfontfamily']}
self.label1.set(**label_kw)
self.label2.set(**label_kw)
grid_kw = {k[5:]: v for k, v in kwargs.items()
if k in _gridline_param_names}
self.gridline.set(**grid_kw)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
raise NotImplementedError('Derived must override')
def _get_text1_transform(self):
raise NotImplementedError('Derived must override')
def _get_text2_transform(self):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in data coords, y in axes coords
ax = self.axes
self.tick1line.set(
data=([0], [0]), transform=ax.get_xaxis_transform("tick1"))
self.tick2line.set(
data=([0], [1]), transform=ax.get_xaxis_transform("tick2"))
self.gridline.set(
data=([0, 0], [0, 1]), transform=ax.get_xaxis_transform("grid"))
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=0, y=1,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def _apply_tickdir(self, tickdir):
# docstring inherited
super()._apply_tickdir(tickdir)
mark1, mark2 = {
'out': (mlines.TICKDOWN, mlines.TICKUP),
'in': (mlines.TICKUP, mlines.TICKDOWN),
'inout': ('|', '|'),
}[self._tickdir]
self.tick1line.set_marker(mark1)
self.tick2line.set_marker(mark2)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_xdata((loc,))
self.tick2line.set_xdata((loc,))
self.gridline.set_xdata((loc,))
self.label1.set_x(loc)
self.label2.set_x(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in axes coords, y in data coords
ax = self.axes
self.tick1line.set(
data=([0], [0]), transform=ax.get_yaxis_transform("tick1"))
self.tick2line.set(
data=([1], [0]), transform=ax.get_yaxis_transform("tick2"))
self.gridline.set(
data=([0, 1], [0, 0]), transform=ax.get_yaxis_transform("grid"))
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=1, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def _apply_tickdir(self, tickdir):
# docstring inherited
super()._apply_tickdir(tickdir)
mark1, mark2 = {
'out': (mlines.TICKLEFT, mlines.TICKRIGHT),
'in': (mlines.TICKRIGHT, mlines.TICKLEFT),
'inout': ('_', '_'),
}[self._tickdir]
self.tick1line.set_marker(mark1)
self.tick2line.set_marker(mark2)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_ydata((loc,))
self.tick2line.set_ydata((loc,))
self.gridline.set_ydata((loc,))
self.label1.set_y(loc)
self.label2.set_y(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervaly
class Ticker:
"""
A container for the objects defining tick position and format.
Attributes
----------
locator : `~matplotlib.ticker.Locator` subclass
Determines the positions of the ticks.
formatter : `~matplotlib.ticker.Formatter` subclass
Determines the format of the tick labels.
"""
def __init__(self):
self._locator = None
self._formatter = None
self._locator_is_default = True
self._formatter_is_default = True
@property
def locator(self):
return self._locator
@locator.setter
def locator(self, locator):
if not isinstance(locator, mticker.Locator):
raise TypeError('locator must be a subclass of '
'matplotlib.ticker.Locator')
self._locator = locator
@property
def formatter(self):
return self._formatter
@formatter.setter
def formatter(self, formatter):
if not isinstance(formatter, mticker.Formatter):
raise TypeError('formatter must be a subclass of '
'matplotlib.ticker.Formatter')
self._formatter = formatter
class _LazyTickList:
"""
A descriptor for lazy instantiation of tick lists.
See comment above definition of the ``majorTicks`` and ``minorTicks``
attributes.
"""
def __init__(self, major):
self._major = major
def __get__(self, instance, owner):
if instance is None:
return self
else:
# instance._get_tick() can itself try to access the majorTicks
# attribute (e.g. in certain projection classes which override
# e.g. get_xaxis_text1_transform). In order to avoid infinite
# recursion, first set the majorTicks on the instance temporarily
# to an empty lis. Then create the tick; note that _get_tick()
# may call reset_ticks(). Therefore, the final tick list is
# created and assigned afterwards.
if self._major:
instance.majorTicks = []
tick = instance._get_tick(major=True)
instance.majorTicks = [tick]
return instance.majorTicks
else:
instance.minorTicks = []
tick = instance._get_tick(major=False)
instance.minorTicks = [tick]
return instance.minorTicks
class Axis(martist.Artist):
"""
Base class for `.XAxis` and `.YAxis`.
Attributes
----------
isDefault_label : bool
axes : `~matplotlib.axes.Axes`
The `~.axes.Axes` to which the Axis belongs.
major : `~matplotlib.axis.Ticker`
Determines the major tick positions and their label format.
minor : `~matplotlib.axis.Ticker`
Determines the minor tick positions and their label format.
callbacks : `~matplotlib.cbook.CallbackRegistry`
label : `~matplotlib.text.Text`
The axis label.
labelpad : float
The distance between the axis label and the tick labels.
Defaults to :rc:`axes.labelpad`.
offsetText : `~matplotlib.text.Text`
A `.Text` object containing the data offset of the ticks (if any).
pickradius : float
The acceptance radius for containment tests. See also `.Axis.contains`.
majorTicks : list of `.Tick`
The major ticks.
.. warning::
Ticks are not guaranteed to be persistent. Various operations
can create, delete and modify the Tick instances. There is an
imminent risk that changes to individual ticks will not
survive if you work on the figure further (including also
panning/zooming on a displayed figure).
Working on the individual ticks is a method of last resort.
Use `.set_tick_params` instead if possible.
minorTicks : list of `.Tick`
The minor ticks.
"""
OFFSETTEXTPAD = 3
# The class used in _get_tick() to create tick instances. Must either be
# overwritten in subclasses, or subclasses must reimplement _get_tick().
_tick_class = None
converter = _api.deprecate_privatize_attribute(
"3.10",
alternative="get_converter and set_converter methods"
)
def __str__(self):
return "{}({},{})".format(
type(self).__name__, *self.axes.transAxes.transform((0, 0)))
def __init__(self, axes, *, pickradius=15, clear=True):
"""
Parameters
----------
axes : `~matplotlib.axes.Axes`
The `~.axes.Axes` to which the created Axis belongs.
pickradius : float
The acceptance radius for containment tests. See also
`.Axis.contains`.
clear : bool, default: True
Whether to clear the Axis on creation. This is not required, e.g., when
creating an Axis as part of an Axes, as ``Axes.clear`` will call
``Axis.clear``.
.. versionadded:: 3.8
"""
super().__init__()
self._remove_overlapping_locs = True
self.set_figure(axes.get_figure(root=False))
self.isDefault_label = True
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry(signals=["units"])
self._autolabelpos = True
self.label = mtext.Text(
np.nan, np.nan,
fontsize=mpl.rcParams['axes.labelsize'],
fontweight=mpl.rcParams['axes.labelweight'],
color=mpl.rcParams['axes.labelcolor'],
) #: The `.Text` object of the axis label.
self._set_artist_props(self.label)
self.offsetText = mtext.Text(np.nan, np.nan)
self._set_artist_props(self.offsetText)
self.labelpad = mpl.rcParams['axes.labelpad']
self.pickradius = pickradius
# Initialize here for testing; later add API
self._major_tick_kw = dict()
self._minor_tick_kw = dict()
if clear:
self.clear()
else:
self._converter = None
self._converter_is_explicit = False
self.units = None
self._autoscale_on = True
@property
def isDefault_majloc(self):
return self.major._locator_is_default
@isDefault_majloc.setter
def isDefault_majloc(self, value):
self.major._locator_is_default = value
@property
def isDefault_majfmt(self):
return self.major._formatter_is_default
@isDefault_majfmt.setter
def isDefault_majfmt(self, value):
self.major._formatter_is_default = value
@property
def isDefault_minloc(self):
return self.minor._locator_is_default
@isDefault_minloc.setter
def isDefault_minloc(self, value):
self.minor._locator_is_default = value
@property
def isDefault_minfmt(self):
return self.minor._formatter_is_default
@isDefault_minfmt.setter
def isDefault_minfmt(self, value):
self.minor._formatter_is_default = value
def _get_shared_axes(self):
"""Return Grouper of shared Axes for current axis."""
return self.axes._shared_axes[
self._get_axis_name()].get_siblings(self.axes)
def _get_shared_axis(self):
"""Return list of shared axis for current axis."""
name = self._get_axis_name()
return [ax._axis_map[name] for ax in self._get_shared_axes()]
def _get_axis_name(self):
"""Return the axis name."""
return next(name for name, axis in self.axes._axis_map.items()
if axis is self)
# During initialization, Axis objects often create ticks that are later
# unused; this turns out to be a very slow step. Instead, use a custom
# descriptor to make the tick lists lazy and instantiate them as needed.
majorTicks = _LazyTickList(major=True)
minorTicks = _LazyTickList(major=False)
def get_remove_overlapping_locs(self):
return self._remove_overlapping_locs
def set_remove_overlapping_locs(self, val):
self._remove_overlapping_locs = bool(val)
remove_overlapping_locs = property(
get_remove_overlapping_locs, set_remove_overlapping_locs,
doc=('If minor ticker locations that overlap with major '
'ticker locations should be trimmed.'))
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label.
By default, the x coordinate of the y label and the y coordinate of the
x label are determined by the tick label bounding boxes, but this can
lead to poor alignment of multiple labels if there are multiple Axes.
You can also specify the coordinate system of the label with the
transform. If None, the default coordinate system will be the axes
coordinate system: (0, 0) is bottom left, (0.5, 0.5) is center, etc.
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
self.stale = True
def get_transform(self):
"""Return the transform used in the Axis' scale"""
return self._scale.get_transform()
def get_scale(self):
"""Return this Axis' scale (as a str)."""
return self._scale.name
def _set_scale(self, value, **kwargs):
if not isinstance(value, mscale.ScaleBase):
self._scale = mscale.scale_factory(value, self, **kwargs)
else:
self._scale = value
self._scale.set_default_locators_and_formatters(self)
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
# This method is directly wrapped by Axes.set_{x,y}scale.
def _set_axes_scale(self, value, **kwargs):
"""
Set this Axis' scale.
Parameters
----------
value : str or `.ScaleBase`
The axis scale type to apply. Valid string values are the names of scale
classes ("linear", "log", "function",...). These may be the names of any
of the :ref:`built-in scales` or of any custom scales
registered using `matplotlib.scale.register_scale`.
**kwargs
If *value* is a string, keywords are passed to the instantiation method of
the respective class.
"""
name = self._get_axis_name()
old_default_lims = (self.get_major_locator()
.nonsingular(-np.inf, np.inf))
for ax in self._get_shared_axes():
ax._axis_map[name]._set_scale(value, **kwargs)
ax._update_transScale()
ax.stale = True
new_default_lims = (self.get_major_locator()
.nonsingular(-np.inf, np.inf))
if old_default_lims != new_default_lims:
# Force autoscaling now, to take advantage of the scale locator's
# nonsingular() before it possibly gets swapped out by the user.
self.axes.autoscale_view(
**{f"scale{k}": k == name for k in self.axes._axis_names})
def limit_range_for_scale(self, vmin, vmax):
"""
Return the range *vmin*, *vmax*, restricted to the domain supported by the
current scale.
"""
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def _get_autoscale_on(self):
"""Return whether this Axis is autoscaled."""
return self._autoscale_on
def _set_autoscale_on(self, b):
"""
Set whether this Axis is autoscaled when drawing or by `.Axes.autoscale_view`.
If b is None, then the value is not changed.
Parameters
----------
b : bool
"""
if b is not None:
self._autoscale_on = b
def get_children(self):
return [self.label, self.offsetText,
*self.get_major_ticks(), *self.get_minor_ticks()]
def _reset_major_tick_kw(self):
self._major_tick_kw.clear()
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
def _reset_minor_tick_kw(self):
self._minor_tick_kw.clear()
self._minor_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'minor'))
def clear(self):
"""
Clear the axis.
This resets axis properties to their default values:
- the label
- the scale
- locators, formatters and ticks
- major and minor grid
- units
- registered callbacks
"""
self.label._reset_visual_defaults()
# The above resets the label formatting using text rcParams,
# so we then update the formatting using axes rcParams
self.label.set_color(mpl.rcParams['axes.labelcolor'])
self.label.set_fontsize(mpl.rcParams['axes.labelsize'])
self.label.set_fontweight(mpl.rcParams['axes.labelweight'])
self.offsetText._reset_visual_defaults()
self.labelpad = mpl.rcParams['axes.labelpad']
self._init()
self._set_scale('linear')
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry(signals=["units"])
# whether the grids are on
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
self._minor_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'minor'))
self.reset_ticks()
self._converter = None
self._converter_is_explicit = False
self.units = None
self.stale = True
def reset_ticks(self):
"""
Re-initialize the major and minor Tick lists.
Each list starts with a single fresh Tick.
"""
# Restore the lazy tick lists.
try:
del self.majorTicks
except AttributeError:
pass
try:
del self.minorTicks
except AttributeError:
pass
try:
self.set_clip_path(self.axes.patch)
except AttributeError:
pass
def minorticks_on(self):
"""
Display default minor ticks on the Axis, depending on the scale
(`~.axis.Axis.get_scale`).
Scales use specific minor locators:
- log: `~.LogLocator`
- symlog: `~.SymmetricalLogLocator`
- asinh: `~.AsinhLocator`
- logit: `~.LogitLocator`
- default: `~.AutoMinorLocator`
Displaying minor ticks may reduce performance; you may turn them off
using `minorticks_off()` if drawing speed is a problem.
"""
scale = self.get_scale()
if scale == 'log':
s = self._scale
self.set_minor_locator(mticker.LogLocator(s.base, s.subs))
elif scale == 'symlog':
s = self._scale
self.set_minor_locator(
mticker.SymmetricalLogLocator(s._transform, s.subs))
elif scale == 'asinh':
s = self._scale
self.set_minor_locator(
mticker.AsinhLocator(s.linear_width, base=s._base,
subs=s._subs))
elif scale == 'logit':
self.set_minor_locator(mticker.LogitLocator(minor=True))
else:
self.set_minor_locator(mticker.AutoMinorLocator())
def minorticks_off(self):
"""Remove minor ticks from the Axis."""
self.set_minor_locator(mticker.NullLocator())
def set_tick_params(self, which='major', reset=False, **kwargs):
"""
Set appearance parameters for ticks, ticklabels, and gridlines.
For documentation of keyword arguments, see
:meth:`matplotlib.axes.Axes.tick_params`.
See Also
--------
.Axis.get_tick_params
View the current style settings for ticks, ticklabels, and
gridlines.
"""
_api.check_in_list(['major', 'minor', 'both'], which=which)
kwtrans = self._translate_tick_params(kwargs)
# the kwargs are stored in self._major/minor_tick_kw so that any
# future new ticks will automatically get them
if reset:
if which in ['major', 'both']:
self._reset_major_tick_kw()
self._major_tick_kw.update(kwtrans)
if which in ['minor', 'both']:
self._reset_minor_tick_kw()
self._minor_tick_kw.update(kwtrans)
self.reset_ticks()
else:
if which in ['major', 'both']:
self._major_tick_kw.update(kwtrans)
for tick in self.majorTicks:
tick._apply_params(**kwtrans)
if which in ['minor', 'both']:
self._minor_tick_kw.update(kwtrans)
for tick in self.minorTicks:
tick._apply_params(**kwtrans)
# labelOn and labelcolor also apply to the offset text.
if 'label1On' in kwtrans or 'label2On' in kwtrans:
self.offsetText.set_visible(
self._major_tick_kw.get('label1On', False)
or self._major_tick_kw.get('label2On', False))
if 'labelcolor' in kwtrans:
self.offsetText.set_color(kwtrans['labelcolor'])
self.stale = True
def get_tick_params(self, which='major'):
"""
Get appearance parameters for ticks, ticklabels, and gridlines.
.. versionadded:: 3.7
Parameters
----------
which : {'major', 'minor'}, default: 'major'
The group of ticks for which the parameters are retrieved.
Returns
-------
dict
Properties for styling tick elements added to the axis.
Notes
-----
This method returns the appearance parameters for styling *new*
elements added to this axis and may be different from the values
on current elements if they were modified directly by the user
(e.g., via ``set_*`` methods on individual tick objects).
Examples
--------
::
>>> ax.yaxis.set_tick_params(labelsize=30, labelcolor='red',
... direction='out', which='major')
>>> ax.yaxis.get_tick_params(which='major')
{'direction': 'out',
'left': True,
'right': False,
'labelleft': True,
'labelright': False,
'gridOn': False,
'labelsize': 30,
'labelcolor': 'red'}
>>> ax.yaxis.get_tick_params(which='minor')
{'left': True,
'right': False,
'labelleft': True,
'labelright': False,
'gridOn': False}
"""
_api.check_in_list(['major', 'minor'], which=which)
if which == 'major':
return self._translate_tick_params(
self._major_tick_kw, reverse=True
)
return self._translate_tick_params(self._minor_tick_kw, reverse=True)
@classmethod
def _translate_tick_params(cls, kw, reverse=False):
"""
Translate the kwargs supported by `.Axis.set_tick_params` to kwargs
supported by `.Tick._apply_params`.
In particular, this maps axis specific names like 'top', 'left'
to the generic tick1, tick2 logic of the axis. Additionally, there
are some other name translations.
Returns a new dict of translated kwargs.
Note: Use reverse=True to translate from those supported by
`.Tick._apply_params` back to those supported by
`.Axis.set_tick_params`.
"""
kw_ = {**kw}
# The following lists may be moved to a more accessible location.
allowed_keys = [
'size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'labelfontfamily', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On',
'length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop',
'labelrotation',
*_gridline_param_names]
keymap = {
# tick_params key -> axis key
'length': 'size',
'direction': 'tickdir',
'rotation': 'labelrotation',
'left': 'tick1On',
'bottom': 'tick1On',
'right': 'tick2On',
'top': 'tick2On',
'labelleft': 'label1On',
'labelbottom': 'label1On',
'labelright': 'label2On',
'labeltop': 'label2On',
}
if reverse:
kwtrans = {}
is_x_axis = cls.axis_name == 'x'
y_axis_keys = ['left', 'right', 'labelleft', 'labelright']
for oldkey, newkey in keymap.items():
if newkey in kw_:
if is_x_axis and oldkey in y_axis_keys:
continue
else:
kwtrans[oldkey] = kw_.pop(newkey)
else:
kwtrans = {
newkey: kw_.pop(oldkey)
for oldkey, newkey in keymap.items() if oldkey in kw_
}
if 'colors' in kw_:
c = kw_.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw_:
if key not in allowed_keys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, allowed_keys))
kwtrans.update(kw_)
return kwtrans
def set_clip_path(self, path, transform=None):
super().set_clip_path(path, transform)
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(path, transform)
self.stale = True
def get_view_interval(self):
"""Return the ``(min, max)`` view limits of this axis."""
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
"""
Set the axis view limits. This method is for internal use; Matplotlib
users should typically use e.g. `~.Axes.set_xlim` or `~.Axes.set_ylim`.
If *ignore* is False (the default), this method will never reduce the
preexisting view limits, only expand them if *vmin* or *vmax* are not
within them. Moreover, the order of *vmin* and *vmax* does not matter;
the orientation of the axis will not change.
If *ignore* is True, the view limits will be set exactly to ``(vmin,
vmax)`` in that order.
"""
raise NotImplementedError('Derived must override')
def get_data_interval(self):
"""Return the ``(min, max)`` data limits of this axis."""
raise NotImplementedError('Derived must override')
def set_data_interval(self, vmin, vmax, ignore=False):
"""
Set the axis data limits. This method is for internal use.
If *ignore* is False (the default), this method will never reduce the
preexisting data limits, only expand them if *vmin* or *vmax* are not
within them. Moreover, the order of *vmin* and *vmax* does not matter;
the orientation of the axis will not change.
If *ignore* is True, the data limits will be set exactly to ``(vmin,
vmax)`` in that order.
"""
raise NotImplementedError('Derived must override')
def get_inverted(self):
"""
Return whether this Axis is oriented in the "inverse" direction.
The "normal" direction is increasing to the right for the x-axis and to
the top for the y-axis; the "inverse" direction is increasing to the
left for the x-axis and to the bottom for the y-axis.
"""
low, high = self.get_view_interval()
return high < low
def set_inverted(self, inverted):
"""
Set whether this Axis is oriented in the "inverse" direction.
The "normal" direction is increasing to the right for the x-axis and to
the top for the y-axis; the "inverse" direction is increasing to the
left for the x-axis and to the bottom for the y-axis.
"""
a, b = self.get_view_interval()
# cast to bool to avoid bad interaction between python 3.8 and np.bool_
self._set_lim(*sorted((a, b), reverse=bool(inverted)), auto=None)
def set_default_intervals(self):
"""
Set the default limits for the axis data and view interval if they
have not been not mutated yet.
"""
# this is mainly in support of custom object plotting. For
# example, if someone passes in a datetime object, we do not
# know automagically how to set the default min/max of the
# data and view limits. The unit conversion AxisInfo
# interface provides a hook for custom types to register
# default limits through the AxisInfo.default_limits
# attribute, and the derived code below will check for that
# and use it if it's available (else just use 0..1)
def _set_lim(self, v0, v1, *, emit=True, auto):
"""
Set view limits.
This method is a helper for the Axes ``set_xlim``, ``set_ylim``, and
``set_zlim`` methods.
Parameters
----------
v0, v1 : float
The view limits. (Passing *v0* as a (low, high) pair is not
supported; normalization must occur in the Axes setters.)
emit : bool, default: True
Whether to notify observers of limit change.
auto : bool or None, default: False
Whether to turn on autoscaling of the x-axis. True turns on, False
turns off, None leaves unchanged.
"""
name = self._get_axis_name()
self.axes._process_unit_info([(name, (v0, v1))], convert=False)
v0 = self.axes._validate_converted_limits(v0, self.convert_units)
v1 = self.axes._validate_converted_limits(v1, self.convert_units)
if v0 is None or v1 is None:
# Axes init calls set_xlim(0, 1) before get_xlim() can be called,
# so only grab the limits if we really need them.
old0, old1 = self.get_view_interval()
if v0 is None:
v0 = old0
if v1 is None:
v1 = old1
if self.get_scale() == 'log' and (v0 <= 0 or v1 <= 0):
# Axes init calls set_xlim(0, 1) before get_xlim() can be called,
# so only grab the limits if we really need them.
old0, old1 = self.get_view_interval()
if v0 <= 0:
_api.warn_external(f"Attempt to set non-positive {name}lim on "
f"a log-scaled axis will be ignored.")
v0 = old0
if v1 <= 0:
_api.warn_external(f"Attempt to set non-positive {name}lim on "
f"a log-scaled axis will be ignored.")
v1 = old1
if v0 == v1:
_api.warn_external(
f"Attempting to set identical low and high {name}lims "
f"makes transformation singular; automatically expanding.")
reverse = bool(v0 > v1) # explicit cast needed for python3.8+np.bool_.
v0, v1 = self.get_major_locator().nonsingular(v0, v1)
v0, v1 = self.limit_range_for_scale(v0, v1)
v0, v1 = sorted([v0, v1], reverse=bool(reverse))
self.set_view_interval(v0, v1, ignore=True)
# Mark viewlims as no longer stale without triggering an autoscale.
for ax in self._get_shared_axes():
ax._stale_viewlims[name] = False
self._set_autoscale_on(auto)
if emit:
self.axes.callbacks.process(f"{name}lim_changed", self.axes)
# Call all of the other Axes that are shared with this one
for other in self._get_shared_axes():
if other is self.axes:
continue
other._axis_map[name]._set_lim(v0, v1, emit=False, auto=auto)
if emit:
other.callbacks.process(f"{name}lim_changed", other)
if ((other_fig := other.get_figure(root=False)) !=
self.get_figure(root=False)):
other_fig.canvas.draw_idle()
self.stale = True
return v0, v1
def _set_artist_props(self, a):
if a is None:
return
a.set_figure(self.get_figure(root=False))
def _update_ticks(self):
"""
Update ticks (position and labels) using the current data interval of
the axes. Return the list of ticks that will be drawn.
"""
major_locs = self.get_majorticklocs()
major_labels = self.major.formatter.format_ticks(major_locs)
major_ticks = self.get_major_ticks(len(major_locs))
for tick, loc, label in zip(major_ticks, major_locs, major_labels):
tick.update_position(loc)
tick.label1.set_text(label)
tick.label2.set_text(label)
minor_locs = self.get_minorticklocs()
minor_labels = self.minor.formatter.format_ticks(minor_locs)
minor_ticks = self.get_minor_ticks(len(minor_locs))
for tick, loc, label in zip(minor_ticks, minor_locs, minor_labels):
tick.update_position(loc)
tick.label1.set_text(label)
tick.label2.set_text(label)
ticks = [*major_ticks, *minor_ticks]
view_low, view_high = self.get_view_interval()
if view_low > view_high:
view_low, view_high = view_high, view_low
if (hasattr(self, "axes") and self.axes.name == '3d'
and mpl.rcParams['axes3d.automargin']):
# In mpl3.8, the margin was 1/48. Due to the change in automargin
# behavior in mpl3.9, we need to adjust this to compensate for a
# zoom factor of 2/48, giving us a 23/24 modifier. So the new
# margin is 0.019965277777777776 = 1/48*23/24.
margin = 0.019965277777777776
delta = view_high - view_low
view_high = view_high - delta * margin
view_low = view_low + delta * margin
interval_t = self.get_transform().transform([view_low, view_high])
ticks_to_draw = []
for tick in ticks:
try:
loc_t = self.get_transform().transform(tick.get_loc())
except AssertionError:
# transforms.transform doesn't allow masked values but
# some scales might make them, so we need this try/except.
pass
else:
if mtransforms._interval_contains_close(interval_t, loc_t):
ticks_to_draw.append(tick)
return ticks_to_draw
def _get_ticklabel_bboxes(self, ticks, renderer=None):
"""Return lists of bboxes for ticks' label1's and label2's."""
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
return ([tick.label1.get_window_extent(renderer)
for tick in ticks if tick.label1.get_visible()],
[tick.label2.get_window_extent(renderer)
for tick in ticks if tick.label2.get_visible()])
def get_tightbbox(self, renderer=None, *, for_layout_only=False):
"""
Return a bounding box that encloses the axis. It only accounts
tick labels, axis label, and offsetText.
If *for_layout_only* is True, then the width of the label (if this
is an x-axis) or the height of the label (if this is a y-axis) is
collapsed to near zero. This allows tight/constrained_layout to ignore
too-long labels when doing their layout.
"""
if not self.get_visible() or for_layout_only and not self.get_in_layout():
return
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
ticks_to_draw = self._update_ticks()
self._update_label_position(renderer)
# go back to just this axis's tick labels
tlb1, tlb2 = self._get_ticklabel_bboxes(ticks_to_draw, renderer)
self._update_offset_text_position(tlb1, tlb2)
self.offsetText.set_text(self.major.formatter.get_offset())
bboxes = [
*(a.get_window_extent(renderer)
for a in [self.offsetText]
if a.get_visible()),
*tlb1, *tlb2,
]
# take care of label
if self.label.get_visible():
bb = self.label.get_window_extent(renderer)
# for constrained/tight_layout, we want to ignore the label's
# width/height because the adjustments they make can't be improved.
# this code collapses the relevant direction
if for_layout_only:
if self.axis_name == "x" and bb.width > 0:
bb.x0 = (bb.x0 + bb.x1) / 2 - 0.5
bb.x1 = bb.x0 + 1.0
if self.axis_name == "y" and bb.height > 0:
bb.y0 = (bb.y0 + bb.y1) / 2 - 0.5
bb.y1 = bb.y0 + 1.0
bboxes.append(bb)
bboxes = [b for b in bboxes
if 0 < b.width < np.inf and 0 < b.height < np.inf]
if bboxes:
return mtransforms.Bbox.union(bboxes)
else:
return None
def get_tick_padding(self):
values = []
if len(self.majorTicks):
values.append(self.majorTicks[0].get_tick_padding())
if len(self.minorTicks):
values.append(self.minorTicks[0].get_tick_padding())
return max(values, default=0)
@martist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
renderer.open_group(__name__, gid=self.get_gid())
ticks_to_draw = self._update_ticks()
tlb1, tlb2 = self._get_ticklabel_bboxes(ticks_to_draw, renderer)
for tick in ticks_to_draw:
tick.draw(renderer)
# Shift label away from axes to avoid overlapping ticklabels.
self._update_label_position(renderer)
self.label.draw(renderer)
self._update_offset_text_position(tlb1, tlb2)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.draw(renderer)
renderer.close_group(__name__)
self.stale = False
def get_gridlines(self):
r"""Return this Axis' grid lines as a list of `.Line2D`\s."""
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline',
[tick.gridline for tick in ticks])
def set_label(self, s):
"""Assigning legend labels is not supported. Raises RuntimeError."""
raise RuntimeError(
"A legend label cannot be assigned to an Axis. Did you mean to "
"set the axis label via set_label_text()?")
def get_label(self):
"""
Return the axis label as a Text instance.
.. admonition:: Discouraged
This overrides `.Artist.get_label`, which is for legend labels, with a new
semantic. It is recommended to use the attribute ``Axis.label`` instead.
"""
return self.label
def get_offset_text(self):
"""Return the axis offsetText as a Text instance."""
return self.offsetText
def get_pickradius(self):
"""Return the depth of the axis used by the picker."""
return self._pickradius
def get_majorticklabels(self):
"""Return this Axis' major tick labels, as a list of `~.text.Text`."""
self._update_ticks()
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]
labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]
return labels1 + labels2
def get_minorticklabels(self):
"""Return this Axis' minor tick labels, as a list of `~.text.Text`."""
self._update_ticks()
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]
labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]
return labels1 + labels2
def get_ticklabels(self, minor=False, which=None):
"""
Get this Axis' tick labels.
Parameters
----------
minor : bool
Whether to return the minor or the major ticklabels.
which : None, ('minor', 'major', 'both')
Overrides *minor*.
Selects which ticklabels to return
Returns
-------
list of `~matplotlib.text.Text`
"""
if which is not None:
if which == 'minor':
return self.get_minorticklabels()
elif which == 'major':
return self.get_majorticklabels()
elif which == 'both':
return self.get_majorticklabels() + self.get_minorticklabels()
else:
_api.check_in_list(['major', 'minor', 'both'], which=which)
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
r"""Return this Axis' major tick lines as a list of `.Line2D`\s."""
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
r"""Return this Axis' minor tick lines as a list of `.Line2D`\s."""
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
r"""Return this Axis' tick lines as a list of `.Line2D`\s."""
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"""Return this Axis' major tick locations in data coordinates."""
return self.major.locator()
def get_minorticklocs(self):
"""Return this Axis' minor tick locations in data coordinates."""
# Remove minor ticks duplicating major ticks.
minor_locs = np.asarray(self.minor.locator())
if self.remove_overlapping_locs:
major_locs = self.major.locator()
transform = self._scale.get_transform()
tr_minor_locs = transform.transform(minor_locs)
tr_major_locs = transform.transform(major_locs)
lo, hi = sorted(transform.transform(self.get_view_interval()))
# Use the transformed view limits as scale. 1e-5 is the default
# rtol for np.isclose.
tol = (hi - lo) * 1e-5
mask = np.isclose(tr_minor_locs[:, None], tr_major_locs[None, :],
atol=tol, rtol=0).any(axis=1)
minor_locs = minor_locs[~mask]
return minor_locs
def get_ticklocs(self, *, minor=False):
"""
Return this Axis' tick locations in data coordinates.
The locations are not clipped to the current axis limits and hence
may contain locations that are not visible in the output.
Parameters
----------
minor : bool, default: False
True to return the minor tick directions,
False to return the major tick directions.
Returns
-------
array of tick locations
"""
return self.get_minorticklocs() if minor else self.get_majorticklocs()
def get_ticks_direction(self, minor=False):
"""
Return an array of this Axis' tick directions.
Parameters
----------
minor : bool, default: False
True to return the minor tick directions,
False to return the major tick directions.
Returns
-------
array of tick directions
"""
if minor:
return np.array(
[tick._tickdir for tick in self.get_minor_ticks()])
else:
return np.array(
[tick._tickdir for tick in self.get_major_ticks()])
def _get_tick(self, major):
"""Return the default tick instance."""
if self._tick_class is None:
raise NotImplementedError(
f"The Axis subclass {self.__class__.__name__} must define "
"_tick_class or reimplement _get_tick()")
tick_kw = self._major_tick_kw if major else self._minor_tick_kw
return self._tick_class(self.axes, 0, major=major, **tick_kw)
def _get_tick_label_size(self, axis_name):
"""
Return the text size of tick labels for this Axis.
This is a convenience function to avoid having to create a `Tick` in
`.get_tick_space`, since it is expensive.
"""
tick_kw = self._major_tick_kw
size = tick_kw.get('labelsize',
mpl.rcParams[f'{axis_name}tick.labelsize'])
return mtext.FontProperties(size=size).get_size_in_points()
def _copy_tick_props(self, src, dest):
"""Copy the properties from *src* tick to *dest* tick."""
if src is None or dest is None:
return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.update_from(src)
dest._loc = src._loc
dest._size = src._size
dest._width = src._width
dest._base_pad = src._base_pad
dest._labelrotation = src._labelrotation
dest._zorder = src._zorder
dest._tickdir = src._tickdir
def get_label_text(self):
"""Get the text of the label."""
return self.label.get_text()
def get_major_locator(self):
"""Get the locator of the major ticker."""
return self.major.locator
def get_minor_locator(self):
"""Get the locator of the minor ticker."""
return self.minor.locator
def get_major_formatter(self):
"""Get the formatter of the major ticker."""
return self.major.formatter
def get_minor_formatter(self):
"""Get the formatter of the minor ticker."""
return self.minor.formatter
def get_major_ticks(self, numticks=None):
r"""
Return the list of major `.Tick`\s.
.. warning::
Ticks are not guaranteed to be persistent. Various operations
can create, delete and modify the Tick instances. There is an
imminent risk that changes to individual ticks will not
survive if you work on the figure further (including also
panning/zooming on a displayed figure).
Working on the individual ticks is a method of last resort.
Use `.set_tick_params` instead if possible.
"""
if numticks is None:
numticks = len(self.get_majorticklocs())
while len(self.majorTicks) < numticks:
# Update the new tick label properties from the old.
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
self._copy_tick_props(self.majorTicks[0], tick)
return self.majorTicks[:numticks]
def get_minor_ticks(self, numticks=None):
r"""
Return the list of minor `.Tick`\s.
.. warning::
Ticks are not guaranteed to be persistent. Various operations
can create, delete and modify the Tick instances. There is an
imminent risk that changes to individual ticks will not
survive if you work on the figure further (including also
panning/zooming on a displayed figure).
Working on the individual ticks is a method of last resort.
Use `.set_tick_params` instead if possible.
"""
if numticks is None:
numticks = len(self.get_minorticklocs())
while len(self.minorTicks) < numticks:
# Update the new tick label properties from the old.
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
self._copy_tick_props(self.minorTicks[0], tick)
return self.minorTicks[:numticks]
def grid(self, visible=None, which='major', **kwargs):
"""
Configure the grid lines.
Parameters
----------
visible : bool or None
Whether to show the grid lines. If any *kwargs* are supplied, it
is assumed you want the grid on and *visible* will be set to True.
If *visible* is *None* and there are no *kwargs*, this toggles the
visibility of the lines.
which : {'major', 'minor', 'both'}
The grid lines to apply the changes on.
**kwargs : `~matplotlib.lines.Line2D` properties
Define the line properties of the grid, e.g.::
grid(color='r', linestyle='-', linewidth=2)
"""
if kwargs:
if visible is None:
visible = True
elif not visible: # something false-like but not None
_api.warn_external('First parameter to grid() is false, '
'but line properties are supplied. The '
'grid will be enabled.')
visible = True
which = which.lower()
_api.check_in_list(['major', 'minor', 'both'], which=which)
gridkw = {f'grid_{name}': value for name, value in kwargs.items()}
if which in ['minor', 'both']:
gridkw['gridOn'] = (not self._minor_tick_kw['gridOn']
if visible is None else visible)
self.set_tick_params(which='minor', **gridkw)
if which in ['major', 'both']:
gridkw['gridOn'] = (not self._major_tick_kw['gridOn']
if visible is None else visible)
self.set_tick_params(which='major', **gridkw)
self.stale = True
def update_units(self, data):
"""
Introspect *data* for units converter and update the
``axis.get_converter`` instance if necessary. Return *True*
if *data* is registered for unit conversion.
"""
if not self._converter_is_explicit:
converter = munits.registry.get_converter(data)
else:
converter = self._converter
if converter is None:
return False
neednew = self._converter != converter
self._set_converter(converter)
default = self._converter.default_units(data, self)
if default is not None and self.units is None:
self.set_units(default)
elif neednew:
self._update_axisinfo()
self.stale = True
return True
def _update_axisinfo(self):
"""
Check the axis converter for the stored units to see if the
axis info needs to be updated.
"""
if self._converter is None:
return
info = self._converter.axisinfo(self.units, self)
if info is None:
return
if info.majloc is not None and \
self.major.locator != info.majloc and self.isDefault_majloc:
self.set_major_locator(info.majloc)
self.isDefault_majloc = True
if info.minloc is not None and \
self.minor.locator != info.minloc and self.isDefault_minloc:
self.set_minor_locator(info.minloc)
self.isDefault_minloc = True
if info.majfmt is not None and \
self.major.formatter != info.majfmt and self.isDefault_majfmt:
self.set_major_formatter(info.majfmt)
self.isDefault_majfmt = True
if info.minfmt is not None and \
self.minor.formatter != info.minfmt and self.isDefault_minfmt:
self.set_minor_formatter(info.minfmt)
self.isDefault_minfmt = True
if info.label is not None and self.isDefault_label:
self.set_label_text(info.label)
self.isDefault_label = True
self.set_default_intervals()
def have_units(self):
return self._converter is not None or self.units is not None
def convert_units(self, x):
# If x is natively supported by Matplotlib, doesn't need converting
if munits._is_natively_supported(x):
return x
if self._converter is None:
self._set_converter(munits.registry.get_converter(x))
if self._converter is None:
return x
try:
ret = self._converter.convert(x, self.units, self)
except Exception as e:
raise munits.ConversionError('Failed to convert value(s) to axis '
f'units: {x!r}') from e
return ret
def get_converter(self):
"""
Get the unit converter for axis.
Returns
-------
`~matplotlib.units.ConversionInterface` or None
"""
return self._converter
def set_converter(self, converter):
"""
Set the unit converter for axis.
Parameters
----------
converter : `~matplotlib.units.ConversionInterface`
"""
self._set_converter(converter)
self._converter_is_explicit = True
def _set_converter(self, converter):
if self._converter is converter or self._converter == converter:
return
if self._converter_is_explicit:
raise RuntimeError("Axis already has an explicit converter set")
elif (
self._converter is not None and
not isinstance(converter, type(self._converter)) and
not isinstance(self._converter, type(converter))
):
_api.warn_external(
"This axis already has a converter set and "
"is updating to a potentially incompatible converter"
)
self._converter = converter
def set_units(self, u):
"""
Set the units for axis.
Parameters
----------
u : units tag
Notes
-----
The units of any shared axis will also be updated.
"""
if u == self.units:
return
for axis in self._get_shared_axis():
axis.units = u
axis._update_axisinfo()
axis.callbacks.process('units')
axis.stale = True
def get_units(self):
"""Return the units for axis."""
return self.units
def set_label_text(self, label, fontdict=None, **kwargs):
"""
Set the text value of the axis label.
Parameters
----------
label : str
Text string.
fontdict : dict
Text properties.
.. admonition:: Discouraged
The use of *fontdict* is discouraged. Parameters should be passed as
individual keyword arguments or using dictionary-unpacking
``set_label_text(..., **fontdict)``.
**kwargs
Merged into fontdict.
"""
self.isDefault_label = False
self.label.set_text(label)
if fontdict is not None:
self.label.update(fontdict)
self.label.update(kwargs)
self.stale = True
return self.label
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker.
In addition to a `~matplotlib.ticker.Formatter` instance,
this also accepts a ``str`` or function.
For a ``str`` a `~matplotlib.ticker.StrMethodFormatter` is used.
The field used for the value must be labeled ``'x'`` and the field used
for the position must be labeled ``'pos'``.
See the `~matplotlib.ticker.StrMethodFormatter` documentation for
more information.
For a function, a `~matplotlib.ticker.FuncFormatter` is used.
The function must take two inputs (a tick value ``x`` and a
position ``pos``), and return a string containing the corresponding
tick label.
See the `~matplotlib.ticker.FuncFormatter` documentation for
more information.
Parameters
----------
formatter : `~matplotlib.ticker.Formatter`, ``str``, or function
"""
self._set_formatter(formatter, self.major)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker.
In addition to a `~matplotlib.ticker.Formatter` instance,
this also accepts a ``str`` or function.
See `.Axis.set_major_formatter` for more information.
Parameters
----------
formatter : `~matplotlib.ticker.Formatter`, ``str``, or function
"""
self._set_formatter(formatter, self.minor)
def _set_formatter(self, formatter, level):
if isinstance(formatter, str):
formatter = mticker.StrMethodFormatter(formatter)
# Don't allow any other TickHelper to avoid easy-to-make errors,
# like using a Locator instead of a Formatter.
elif (callable(formatter) and
not isinstance(formatter, mticker.TickHelper)):
formatter = mticker.FuncFormatter(formatter)
else:
_api.check_isinstance(mticker.Formatter, formatter=formatter)
if (isinstance(formatter, mticker.FixedFormatter)
and len(formatter.seq) > 0
and not isinstance(level.locator, mticker.FixedLocator)):
_api.warn_external('FixedFormatter should only be used together '
'with FixedLocator')
if level == self.major:
self.isDefault_majfmt = False
else:
self.isDefault_minfmt = False
level.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_major_locator(self, locator):
"""
Set the locator of the major ticker.
Parameters
----------
locator : `~matplotlib.ticker.Locator`
"""
_api.check_isinstance(mticker.Locator, locator=locator)
self.isDefault_majloc = False
self.major.locator = locator
if self.major.formatter:
self.major.formatter._set_locator(locator)
locator.set_axis(self)
self.stale = True
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker.
Parameters
----------
locator : `~matplotlib.ticker.Locator`
"""
_api.check_isinstance(mticker.Locator, locator=locator)
self.isDefault_minloc = False
self.minor.locator = locator
if self.minor.formatter:
self.minor.formatter._set_locator(locator)
locator.set_axis(self)
self.stale = True
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker.
Parameters
----------
pickradius : float
The acceptance radius for containment tests.
See also `.Axis.contains`.
"""
if not isinstance(pickradius, Real) or pickradius < 0:
raise ValueError("pick radius should be a distance")
self._pickradius = pickradius
pickradius = property(
get_pickradius, set_pickradius, doc="The acceptance radius for "
"containment tests. See also `.Axis.contains`.")
# Helper for set_ticklabels. Defining it here makes it picklable.
@staticmethod
def _format_with_dict(tickd, x, pos):
return tickd.get(x, "")
def set_ticklabels(self, labels, *, minor=False, fontdict=None, **kwargs):
r"""
[*Discouraged*] Set this Axis' tick labels with list of string labels.
.. admonition:: Discouraged
The use of this method is discouraged, because of the dependency on
tick positions. In most cases, you'll want to use
``Axes.set_[x/y/z]ticks(positions, labels)`` or ``Axis.set_ticks``
instead.
If you are using this method, you should always fix the tick
positions before, e.g. by using `.Axis.set_ticks` or by explicitly
setting a `~.ticker.FixedLocator`. Otherwise, ticks are free to
move and the labels may end up in unexpected positions.
Parameters
----------
labels : sequence of str or of `.Text`\s
Texts for labeling each tick location in the sequence set by
`.Axis.set_ticks`; the number of labels must match the number of locations.
The labels are used as is, via a `.FixedFormatter` (without further
formatting).
minor : bool
If True, set minor ticks instead of major ticks.
fontdict : dict, optional
.. admonition:: Discouraged
The use of *fontdict* is discouraged. Parameters should be passed as
individual keyword arguments or using dictionary-unpacking
``set_ticklabels(..., **fontdict)``.
A dictionary controlling the appearance of the ticklabels.
The default *fontdict* is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
**kwargs
Text properties.
.. warning::
This only sets the properties of the current ticks, which is
only sufficient for static plots.
Ticks are not guaranteed to be persistent. Various operations
can create, delete and modify the Tick instances. There is an
imminent risk that these settings can get lost if you work on
the figure further (including also panning/zooming on a
displayed figure).
Use `.set_tick_params` instead if possible.
Returns
-------
list of `.Text`\s
For each tick, includes ``tick.label1`` if it is visible, then
``tick.label2`` if it is visible, in that order.
"""
try:
labels = [t.get_text() if hasattr(t, 'get_text') else t
for t in labels]
except TypeError:
raise TypeError(f"{labels:=} must be a sequence") from None
locator = (self.get_minor_locator() if minor
else self.get_major_locator())
if not labels:
# eg labels=[]:
formatter = mticker.NullFormatter()
elif isinstance(locator, mticker.FixedLocator):
# Passing [] as a list of labels is often used as a way to
# remove all tick labels, so only error for > 0 labels
if len(locator.locs) != len(labels) and len(labels) != 0:
raise ValueError(
"The number of FixedLocator locations"
f" ({len(locator.locs)}), usually from a call to"
" set_ticks, does not match"
f" the number of labels ({len(labels)}).")
tickd = {loc: lab for loc, lab in zip(locator.locs, labels)}
func = functools.partial(self._format_with_dict, tickd)
formatter = mticker.FuncFormatter(func)
else:
_api.warn_external(
"set_ticklabels() should only be used with a fixed number of "
"ticks, i.e. after set_ticks() or using a FixedLocator.")
formatter = mticker.FixedFormatter(labels)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="FixedFormatter should only be used together with FixedLocator")
if minor:
self.set_minor_formatter(formatter)
locs = self.get_minorticklocs()
ticks = self.get_minor_ticks(len(locs))
else:
self.set_major_formatter(formatter)
locs = self.get_majorticklocs()
ticks = self.get_major_ticks(len(locs))
ret = []
if fontdict is not None:
kwargs.update(fontdict)
for pos, (loc, tick) in enumerate(zip(locs, ticks)):
tick.update_position(loc)
tick_label = formatter(loc, pos)
# deal with label1
tick.label1.set_text(tick_label)
tick.label1._internal_update(kwargs)
# deal with label2
tick.label2.set_text(tick_label)
tick.label2._internal_update(kwargs)
# only return visible tick labels
if tick.label1.get_visible():
ret.append(tick.label1)
if tick.label2.get_visible():
ret.append(tick.label2)
self.stale = True
return ret
def _set_tick_locations(self, ticks, *, minor=False):
# see docstring of set_ticks
# XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
locator = mticker.FixedLocator(ticks) # validate ticks early.
if len(ticks):
for axis in self._get_shared_axis():
# set_view_interval maintains any preexisting inversion.
axis.set_view_interval(min(ticks), max(ticks))
self.axes.stale = True
if minor:
self.set_minor_locator(locator)
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator(locator)
return self.get_major_ticks(len(ticks))
def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs):
"""
Set this Axis' tick locations and optionally tick labels.
If necessary, the view limits of the Axis are expanded so that all
given ticks are visible.
Parameters
----------
ticks : 1D array-like
Array of tick locations (either floats or in axis units). The axis
`.Locator` is replaced by a `~.ticker.FixedLocator`.
Pass an empty list (``set_ticks([])``) to remove all ticks.
Some tick formatters will not label arbitrary tick positions;
e.g. log formatters only label decade ticks by default. In
such a case you can set a formatter explicitly on the axis
using `.Axis.set_major_formatter` or provide formatted
*labels* yourself.
labels : list of str, optional
Tick labels for each location in *ticks*; must have the same length as
*ticks*. If set, the labels are used as is, via a `.FixedFormatter`.
If not set, the labels are generated using the axis tick `.Formatter`.
minor : bool, default: False
If ``False``, set only the major ticks; if ``True``, only the minor ticks.
**kwargs
`.Text` properties for the labels. Using these is only allowed if
you pass *labels*. In other cases, please use `~.Axes.tick_params`.
Notes
-----
The mandatory expansion of the view limits is an intentional design
choice to prevent the surprise of a non-visible tick. If you need
other limits, you should set the limits explicitly after setting the
ticks.
"""
if labels is None and kwargs:
first_key = next(iter(kwargs))
raise ValueError(
f"Incorrect use of keyword argument {first_key!r}. Keyword arguments "
"other than 'minor' modify the text labels and can only be used if "
"'labels' are passed as well.")
result = self._set_tick_locations(ticks, minor=minor)
if labels is not None:
self.set_ticklabels(labels, minor=minor, **kwargs)
return result
def _get_tick_boxes_siblings(self, renderer):
"""
Get the bounding boxes for this `.axis` and its siblings
as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`.
By default, it just gets bboxes for *self*.
"""
# Get the Grouper keeping track of x or y label groups for this figure.
name = self._get_axis_name()
if name not in self.get_figure(root=False)._align_label_groups:
return [], []
grouper = self.get_figure(root=False)._align_label_groups[name]
bboxes = []
bboxes2 = []
# If we want to align labels from other Axes:
for ax in grouper.get_siblings(self.axes):
axis = ax._axis_map[name]
ticks_to_draw = axis._update_ticks()
tlb, tlb2 = axis._get_ticklabel_bboxes(ticks_to_draw, renderer)
bboxes.extend(tlb)
bboxes2.extend(tlb2)
return bboxes, bboxes2
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine.
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset text position based on the sequence of bounding
boxes of all the ticklabels.
"""
raise NotImplementedError('Derived must override')
def axis_date(self, tz=None):
"""
Set up axis ticks and labels to treat data along this Axis as dates.
Parameters
----------
tz : str or `datetime.tzinfo`, default: :rc:`timezone`
The timezone used to create date labels.
"""
# By providing a sample datetime instance with the desired timezone,
# the registered converter can be selected, and the "units" attribute,
# which is the timezone, can be set.
if isinstance(tz, str):
import dateutil.tz
tz = dateutil.tz.gettz(tz)
self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))
def get_tick_space(self):
"""Return the estimated number of ticks that can fit on the axis."""
# Must be overridden in the subclass
raise NotImplementedError()
def _get_ticks_position(self):
"""
Helper for `XAxis.get_ticks_position` and `YAxis.get_ticks_position`.
Check the visibility of tick1line, label1, tick2line, and label2 on
the first major and the first minor ticks, and return
- 1 if only tick1line and label1 are visible (which corresponds to
"bottom" for the x-axis and "left" for the y-axis);
- 2 if only tick2line and label2 are visible (which corresponds to
"top" for the x-axis and "right" for the y-axis);
- "default" if only tick1line, tick2line and label1 are visible;
- "unknown" otherwise.
"""
major = self.majorTicks[0]
minor = self.minorTicks[0]
if all(tick.tick1line.get_visible()
and not tick.tick2line.get_visible()
and tick.label1.get_visible()
and not tick.label2.get_visible()
for tick in [major, minor]):
return 1
elif all(tick.tick2line.get_visible()
and not tick.tick1line.get_visible()
and tick.label2.get_visible()
and not tick.label1.get_visible()
for tick in [major, minor]):
return 2
elif all(tick.tick1line.get_visible()
and tick.tick2line.get_visible()
and tick.label1.get_visible()
and not tick.label2.get_visible()
for tick in [major, minor]):
return "default"
else:
return "unknown"
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
Parameters
----------
position : {'top', 'bottom'}
"""
raise NotImplementedError()
def get_minpos(self):
raise NotImplementedError()
def _make_getset_interval(method_name, lim_name, attr_name):
"""
Helper to generate ``get_{data,view}_interval`` and
``set_{data,view}_interval`` implementations.
"""
def getter(self):
# docstring inherited.
return getattr(getattr(self.axes, lim_name), attr_name)
def setter(self, vmin, vmax, ignore=False):
# docstring inherited.
if ignore:
setattr(getattr(self.axes, lim_name), attr_name, (vmin, vmax))
else:
oldmin, oldmax = getter(self)
if oldmin < oldmax:
setter(self, min(vmin, vmax, oldmin), max(vmin, vmax, oldmax),
ignore=True)
else:
setter(self, max(vmin, vmax, oldmin), min(vmin, vmax, oldmax),
ignore=True)
self.stale = True
getter.__name__ = f"get_{method_name}_interval"
setter.__name__ = f"set_{method_name}_interval"
return getter, setter
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x' #: Read-only name identifying the axis.
_tick_class = XTick
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init()
def _init(self):
"""
Initialize the label and offsetText instance values and
`label_position` / `offset_text_position`.
"""
# x in axes coords, y in display coords (to be updated at draw time by
# _update_label_positions and _update_offset_text_position).
self.label.set(
x=0.5, y=0,
verticalalignment='top', horizontalalignment='center',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
)
self.label_position = 'bottom'
if mpl.rcParams['xtick.labelcolor'] == 'inherit':
tick_color = mpl.rcParams['xtick.color']
else:
tick_color = mpl.rcParams['xtick.labelcolor']
self.offsetText.set(
x=1, y=0,
verticalalignment='top', horizontalalignment='right',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
fontsize=mpl.rcParams['xtick.labelsize'],
color=tick_color
)
self.offset_text_position = 'bottom'
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the x-axis."""
if self._different_canvas(mouseevent):
return False, {}
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform((x, y))
except ValueError:
return False, {}
(l, b), (r, t) = self.axes.transAxes.transform([(0, 0), (1, 1)])
inaxis = 0 <= xaxes <= 1 and (
b - self._pickradius < y < b or
t < y < t + self._pickradius)
return inaxis, {}
def set_label_position(self, position):
"""
Set the label position (top or bottom)
Parameters
----------
position : {'top', 'bottom'}
"""
self.label.set_verticalalignment(_api.check_getitem({
'top': 'baseline', 'bottom': 'top',
}, position=position))
self.label_position = position
self.stale = True
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_xlabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.label.get_position()
if self.label_position == 'bottom':
# Union with extents of the bottom spine if present, of the axes otherwise.
bbox = mtransforms.Bbox.union([
*bboxes, self.axes.spines.get("bottom", self.axes).get_window_extent()])
self.label.set_position(
(x, bbox.y0 - self.labelpad * self.get_figure(root=True).dpi / 72))
else:
# Union with extents of the top spine if present, of the axes otherwise.
bbox = mtransforms.Bbox.union([
*bboxes2, self.axes.spines.get("top", self.axes).get_window_extent()])
self.label.set_position(
(x, bbox.y1 + self.labelpad * self.get_figure(root=True).dpi / 72))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
if not hasattr(self, '_tick_position'):
self._tick_position = 'bottom'
if self._tick_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
y = bottom - self.OFFSETTEXTPAD * self.get_figure(root=True).dpi / 72
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
y = top + self.OFFSETTEXTPAD * self.get_figure(root=True).dpi / 72
self.offsetText.set_position((x, y))
def set_ticks_position(self, position):
"""
Set the ticks position.
Parameters
----------
position : {'top', 'bottom', 'both', 'default', 'none'}
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at bottom. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
"""
if position == 'top':
self.set_tick_params(which='both', top=True, labeltop=True,
bottom=False, labelbottom=False)
self._tick_position = 'top'
self.offsetText.set_verticalalignment('bottom')
elif position == 'bottom':
self.set_tick_params(which='both', top=False, labeltop=False,
bottom=True, labelbottom=True)
self._tick_position = 'bottom'
self.offsetText.set_verticalalignment('top')
elif position == 'both':
self.set_tick_params(which='both', top=True,
bottom=True)
elif position == 'none':
self.set_tick_params(which='both', top=False,
bottom=False)
elif position == 'default':
self.set_tick_params(which='both', top=True, labeltop=False,
bottom=True, labelbottom=True)
self._tick_position = 'bottom'
self.offsetText.set_verticalalignment('top')
else:
_api.check_in_list(['top', 'bottom', 'both', 'default', 'none'],
position=position)
self.stale = True
def tick_top(self):
"""
Move ticks and ticklabels (if present) to the top of the Axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('top')
# If labels were turned off before this was called, leave them off.
self.set_tick_params(which='both', labeltop=label)
def tick_bottom(self):
"""
Move ticks and ticklabels (if present) to the bottom of the Axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('bottom')
# If labels were turned off before this was called, leave them off.
self.set_tick_params(which='both', labelbottom=label)
def get_ticks_position(self):
"""
Return the ticks position ("top", "bottom", "default", or "unknown").
"""
return {1: "bottom", 2: "top",
"default": "default", "unknown": "unknown"}[
self._get_ticks_position()]
get_view_interval, set_view_interval = _make_getset_interval(
"view", "viewLim", "intervalx")
get_data_interval, set_data_interval = _make_getset_interval(
"data", "dataLim", "intervalx")
def get_minpos(self):
return self.axes.dataLim.minposx
def set_default_intervals(self):
# docstring inherited
# only change view if dataLim has not changed and user has
# not changed the view:
if (not self.axes.dataLim.mutatedx() and
not self.axes.viewLim.mutatedx()):
if self._converter is not None:
info = self._converter.axisinfo(self.units, self)
if info.default_limits is not None:
xmin, xmax = self.convert_units(info.default_limits)
self.axes.viewLim.intervalx = xmin, xmax
self.stale = True
def get_tick_space(self):
ends = mtransforms.Bbox.unit().transformed(
self.axes.transAxes - self.get_figure(root=False).dpi_scale_trans)
length = ends.width * 72
# There is a heuristic here that the aspect ratio of tick text
# is no more than 3:1
size = self._get_tick_label_size('x') * 3
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y' #: Read-only name identifying the axis.
_tick_class = YTick
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init()
def _init(self):
"""
Initialize the label and offsetText instance values and
`label_position` / `offset_text_position`.
"""
# x in display coords, y in axes coords (to be updated at draw time by
# _update_label_positions and _update_offset_text_position).
self.label.set(
x=0, y=0.5,
verticalalignment='bottom', horizontalalignment='center',
rotation='vertical', rotation_mode='anchor',
transform=mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes),
)
self.label_position = 'left'
if mpl.rcParams['ytick.labelcolor'] == 'inherit':
tick_color = mpl.rcParams['ytick.color']
else:
tick_color = mpl.rcParams['ytick.labelcolor']
# x in axes coords, y in display coords(!).
self.offsetText.set(
x=0, y=0.5,
verticalalignment='baseline', horizontalalignment='left',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
fontsize=mpl.rcParams['ytick.labelsize'],
color=tick_color
)
self.offset_text_position = 'left'
def contains(self, mouseevent):
# docstring inherited
if self._different_canvas(mouseevent):
return False, {}
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform((x, y))
except ValueError:
return False, {}
(l, b), (r, t) = self.axes.transAxes.transform([(0, 0), (1, 1)])
inaxis = 0 <= yaxes <= 1 and (
l - self._pickradius < x < l or
r < x < r + self._pickradius)
return inaxis, {}
def set_label_position(self, position):
"""
Set the label position (left or right)
Parameters
----------
position : {'left', 'right'}
"""
self.label.set_rotation_mode('anchor')
self.label.set_verticalalignment(_api.check_getitem({
'left': 'bottom', 'right': 'top',
}, position=position))
self.label_position = position
self.stale = True
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_ylabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.label.get_position()
if self.label_position == 'left':
# Union with extents of the left spine if present, of the axes otherwise.
bbox = mtransforms.Bbox.union([
*bboxes, self.axes.spines.get("left", self.axes).get_window_extent()])
self.label.set_position(
(bbox.x0 - self.labelpad * self.get_figure(root=True).dpi / 72, y))
else:
# Union with extents of the right spine if present, of the axes otherwise.
bbox = mtransforms.Bbox.union([
*bboxes2, self.axes.spines.get("right", self.axes).get_window_extent()])
self.label.set_position(
(bbox.x1 + self.labelpad * self.get_figure(root=True).dpi / 72, y))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, _ = self.offsetText.get_position()
if 'outline' in self.axes.spines:
# Special case for colorbars:
bbox = self.axes.spines['outline'].get_window_extent()
else:
bbox = self.axes.bbox
top = bbox.ymax
self.offsetText.set_position(
(x, top + self.OFFSETTEXTPAD * self.get_figure(root=True).dpi / 72)
)
def set_offset_position(self, position):
"""
Parameters
----------
position : {'left', 'right'}
"""
x, y = self.offsetText.get_position()
x = _api.check_getitem({'left': 0, 'right': 1}, position=position)
self.offsetText.set_ha(position)
self.offsetText.set_position((x, y))
self.stale = True
def set_ticks_position(self, position):
"""
Set the ticks position.
Parameters
----------
position : {'left', 'right', 'both', 'default', 'none'}
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at left. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
"""
if position == 'right':
self.set_tick_params(which='both', right=True, labelright=True,
left=False, labelleft=False)
self.set_offset_position(position)
elif position == 'left':
self.set_tick_params(which='both', right=False, labelright=False,
left=True, labelleft=True)
self.set_offset_position(position)
elif position == 'both':
self.set_tick_params(which='both', right=True,
left=True)
elif position == 'none':
self.set_tick_params(which='both', right=False,
left=False)
elif position == 'default':
self.set_tick_params(which='both', right=True, labelright=False,
left=True, labelleft=True)
else:
_api.check_in_list(['left', 'right', 'both', 'default', 'none'],
position=position)
self.stale = True
def tick_right(self):
"""
Move ticks and ticklabels (if present) to the right of the Axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('right')
# if labels were turned off before this was called
# leave them off
self.set_tick_params(which='both', labelright=label)
def tick_left(self):
"""
Move ticks and ticklabels (if present) to the left of the Axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('left')
# if labels were turned off before this was called
# leave them off
self.set_tick_params(which='both', labelleft=label)
def get_ticks_position(self):
"""
Return the ticks position ("left", "right", "default", or "unknown").
"""
return {1: "left", 2: "right",
"default": "default", "unknown": "unknown"}[
self._get_ticks_position()]
get_view_interval, set_view_interval = _make_getset_interval(
"view", "viewLim", "intervaly")
get_data_interval, set_data_interval = _make_getset_interval(
"data", "dataLim", "intervaly")
def get_minpos(self):
return self.axes.dataLim.minposy
def set_default_intervals(self):
# docstring inherited
# only change view if dataLim has not changed and user has
# not changed the view:
if (not self.axes.dataLim.mutatedy() and
not self.axes.viewLim.mutatedy()):
if self._converter is not None:
info = self._converter.axisinfo(self.units, self)
if info.default_limits is not None:
ymin, ymax = self.convert_units(info.default_limits)
self.axes.viewLim.intervaly = ymin, ymax
self.stale = True
def get_tick_space(self):
ends = mtransforms.Bbox.unit().transformed(
self.axes.transAxes - self.get_figure(root=False).dpi_scale_trans)
length = ends.height * 72
# Having a spacing of at least 2 just looks good.
size = self._get_tick_label_size('y') * 2
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
"""
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a Matplotlib backend.
`RendererBase`
An abstract base class to handle drawing/rendering operations.
`FigureCanvasBase`
The abstraction layer that separates the `.Figure` from the backend
specific details like a user interface drawing area.
`GraphicsContextBase`
An abstract base class that provides color, line styles, etc.
`Event`
The base class for all of the Matplotlib event handling. Derived classes
such as `KeyEvent` and `MouseEvent` store the meta data like keys and
buttons pressed, x and y locations in pixel and `~.axes.Axes` coordinates.
`ShowBase`
The base class for the ``Show`` class of each interactive backend; the
'show' callable is then set to ``Show.__call__``.
`ToolContainerBase`
The base class for the Toolbar class of each interactive backend.
"""
from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import pathlib
import signal
import socket
import sys
import time
import weakref
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.layout_engine import ConstrainedLayoutEngine
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
_log = logging.getLogger(__name__)
_default_filetypes = {
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format',
'webp': 'WebP Image Format',
}
_default_backends = {
'eps': 'matplotlib.backends.backend_ps',
'jpg': 'matplotlib.backends.backend_agg',
'jpeg': 'matplotlib.backends.backend_agg',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'ps': 'matplotlib.backends.backend_ps',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
'tif': 'matplotlib.backends.backend_agg',
'tiff': 'matplotlib.backends.backend_agg',
'webp': 'matplotlib.backends.backend_agg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
Parameters
----------
format : str
File extension
backend : module string or canvas class
Backend for handling file output
description : str, default: ""
Description of the file type.
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if isinstance(backend_class, str):
backend_class = importlib.import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class RendererBase:
"""
An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing `draw_path` alone would give a
highly capable backend):
* `draw_path`
* `draw_image`
* `draw_gouraud_triangles`
The following methods *should* be implemented in the backend for
optimization reasons:
* `draw_text`
* `draw_markers`
* `draw_path_collection`
* `draw_quad_mesh`
"""
def __init__(self):
super().__init__()
self._texmanager = None
self._text2path = text.TextToPath()
self._raster_depth = 0
self._rasterizing = False
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s* and *gid* (if set) as id.
Only used by the SVG renderer.
"""
def close_group(self, s):
"""
Close a grouping element with label *s*.
Only used by the SVG renderer.
"""
def draw_path(self, gc, path, transform, rgbFace=None):
"""Draw a `~.path.Path` instance using the given affine transform."""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draw a marker at each of *path*'s vertices (excluding control points).
The base (fallback) implementation makes multiple calls to `draw_path`.
Backends may want to override this method in order to draw the marker
only once and reuse it multiple times.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
marker_path : `~matplotlib.path.Path`
The path for the marker.
marker_trans : `~matplotlib.transforms.Transform`
An affine transform applied to the marker.
path : `~matplotlib.path.Path`
The locations to draw the markers.
trans : `~matplotlib.transforms.Transform`
An affine transform applied to the path.
rgbFace : :mpltype:`color`, optional
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offset_trans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draw a collection of *paths*.
Each path is first transformed by the corresponding entry
in *all_transforms* (a list of (3, 3) matrices) and then by
*master_transform*. They are then translated by the corresponding
entry in *offsets*, which has been first transformed by *offset_trans*.
*facecolors*, *edgecolors*, *linewidths*, *linestyles*, and
*antialiased* are lists that set the corresponding properties.
*offset_position* is unused now, but the argument is kept for
backwards compatibility.
The base (fallback) implementation makes multiple calls to `draw_path`.
Backends may want to override this in order to render each set of
path data only once, and then reference that path multiple times with
the different offsets, colors, styles etc. The generator methods
`_iter_collection_raw_paths` and `_iter_collection` are provided to
help with (and standardize) the implementation across backends. It
is highly recommended to use those generators, so that changes to the
behavior of `draw_path_collection` can be made globally.
"""
path_ids = self._iter_collection_raw_paths(master_transform,
paths, all_transforms)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, list(path_ids), offsets, offset_trans,
facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
# Only apply another translation if we have an offset, else we
# reuse the initial transform.
if xo != 0 or yo != 0:
# The transformation can be used by multiple paths. Since
# translate is a inplace operation, we need to copy the
# transformation by .frozen() before applying the translation.
transform = transform.frozen()
transform.translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
Draw a quadmesh.
The base (fallback) implementation converts the quadmesh to paths and
then calls `draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh._convert_mesh_to_paths(coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], float)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draw a series of Gouraud triangles.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
triangles_array : (N, 3, 2) array-like
Array of *N* (x, y) points for the triangles.
colors_array : (N, 3, 4) array-like
Array of *N* RGBA colors for each point of the triangles.
transform : `~matplotlib.transforms.Transform`
An affine transform to apply to the points.
"""
raise NotImplementedError
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
Helper method (along with `_iter_collection`) to implement
`draw_path_collection` in a memory-efficient manner.
This method yields all of the base path/transform combinations, given a
master transform, a list of paths and list of transforms.
The arguments should be exactly what is passed in to
`draw_path_collection`.
The backend should take each yielded path and transform and create an
object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in range(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
`_iter_collection_raw_paths` would be used when calling
`_iter_collection`. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or len(facecolors) == len(edgecolors) == 0:
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, path_ids, offsets, offset_trans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
Helper method (along with `_iter_collection_raw_paths`) to implement
`draw_path_collection` in a memory-efficient manner.
This method yields all of the path, offset and graphics context
combinations to draw the path collection. The caller should already
have looped over the results of `_iter_collection_raw_paths` to draw
this collection.
The arguments should be the same as that passed into
`draw_path_collection`, with the exception of *path_ids*, which is a
list of arbitrary objects that the backend will use to reference one of
the paths created in the `_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
gc0 = self.new_gc()
gc0.copy_properties(gc)
def cycle_or_default(seq, default=None):
# Cycle over *seq* if it is not empty; else always yield *default*.
return (itertools.cycle(seq) if len(seq)
else itertools.repeat(default))
pathids = cycle_or_default(path_ids)
toffsets = cycle_or_default(offset_trans.transform(offsets), (0, 0))
fcs = cycle_or_default(facecolors)
ecs = cycle_or_default(edgecolors)
lws = cycle_or_default(linewidths)
lss = cycle_or_default(linestyles)
aas = cycle_or_default(antialiaseds)
urls = cycle_or_default(urls)
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
for pathid, (xo, yo), fc, ec, lw, ls, aa, url in itertools.islice(
zip(pathids, toffsets, fcs, ecs, lws, lss, aas, urls), N):
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(lw)
if Nlinestyles:
gc0.set_dashes(*ls)
if len(ec) == 4 and ec[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(ec)
if fc is not None and len(fc) == 4 and fc[3] == 0:
fc = None
gc0.set_antialiased(aa)
if Nurls:
gc0.set_url(url)
yield xo, yo, pathid, gc0, fc
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to `draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im, transform=None):
"""
Draw an RGBA image.
Parameters
----------
gc : `.GraphicsContextBase`
A graphics context with clipping information.
x : float
The distance in physical units (i.e., dots or pixels) from the left
hand side of the canvas.
y : float
The distance in physical units (i.e., dots or pixels) from the
bottom side of the canvas.
im : (N, M, 4) array of `numpy.uint8`
An array of RGBA pixels.
transform : `~matplotlib.transforms.Affine2DBase`
If and only if the concrete backend is written such that
`option_scale_image` returns ``True``, an affine transformation
(i.e., an `.Affine2DBase`) *may* be passed to `draw_image`. The
translation vector of the transformation is given in physical units
(i.e., dots or pixels). Note that the transformation does not
override *x* and *y*, and has to be applied *before* translating
the result by *x* and *y* (this can be accomplished by adding *x*
and *y* to the translation vector defined by *transform*).
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
Return whether image composition by Matplotlib should be skipped.
Raster backends should usually return False (letting the C-level
rasterizer take care of image composition); vector backends should
usually return ``not rcParams["image.composite_image"]``.
"""
return False
def option_scale_image(self):
"""
Return whether arbitrary affine transformations in `draw_image` are
supported (True for most vector backends).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None):
"""
Draw a TeX instance.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
x : float
The x location of the text in display coords.
y : float
The y location of the text baseline in display coords.
s : str
The TeX text string.
prop : `~matplotlib.font_manager.FontProperties`
The font properties.
angle : float
The rotation angle in degrees anti-clockwise.
mtext : `~matplotlib.text.Text`
The original text object to be rendered.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw a text instance.
Parameters
----------
gc : `.GraphicsContextBase`
The graphics context.
x : float
The x location of the text in display coords.
y : float
The y location of the text baseline in display coords.
s : str
The text string.
prop : `~matplotlib.font_manager.FontProperties`
The font properties.
angle : float
The rotation angle in degrees anti-clockwise.
ismath : bool or "TeX"
If True, use mathtext parser.
mtext : `~matplotlib.text.Text`
The original text object to be rendered.
Notes
-----
**Notes for backend implementers:**
`.RendererBase.draw_text` also supports passing "TeX" to the *ismath*
parameter to use TeX rendering, but this is not required for actual
rendering backends, and indeed many builtin backends do not support
this. Rather, TeX rendering is provided by `~.RendererBase.draw_tex`.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
Draw the text by converting them to paths using `.TextToPath`.
This private helper supports the same parameters as
`~.RendererBase.draw_text`; setting *ismath* to "TeX" triggers TeX
rendering.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
verts, codes = text2path.get_text_path(prop, s, ismath=ismath)
path = Path(verts, codes)
if self.flipy():
width, height = self.get_canvas_width_height()
transform = (Affine2D()
.scale(fontsize / text2path.FONT_SCALE)
.rotate_deg(angle)
.translate(x, height - y))
else:
transform = (Affine2D()
.scale(fontsize / text2path.FONT_SCALE)
.rotate_deg(angle)
.translate(x, y))
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
Get the width, height, and descent (offset from the bottom to the baseline), in
display coords, of the string *s* with `.FontProperties` *prop*.
Whitespace at the start and the end of *s* is included in the reported width.
"""
fontsize = prop.get_size_in_points()
if ismath == 'TeX':
# todo: handle properties
return self.get_texmanager().get_text_width_height_descent(
s, fontsize, renderer=self)
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
font.set_size(fontsize, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return whether y values increase from top to bottom.
Note that this only affects drawing of texts.
"""
return True
def get_canvas_width_height(self):
"""Return the canvas width and height in display coords."""
return 1, 1
def get_texmanager(self):
"""Return the `.TexManager` instance."""
if self._texmanager is None:
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""Return an instance of a `.GraphicsContextBase`."""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units.
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72 * dpi/72
Parameters
----------
points : float or array-like
Returns
-------
Points converted to pixels
"""
return points
def start_rasterizing(self):
"""
Switch to the raster renderer.
Used by `.MixedModeRenderer`.
"""
def stop_rasterizing(self):
"""
Switch back to the vector renderer and draw the contents of the raster
renderer as an image on the vector renderer.
Used by `.MixedModeRenderer`.
"""
def start_filter(self):
"""
Switch to a temporary renderer for image filtering effects.
Currently only supported by the agg renderer.
"""
def stop_filter(self, filter_func):
"""
Switch back to the original renderer. The contents of the temporary
renderer is processed with the *filter_func* and is drawn on the
original renderer as an image.
Currently only supported by the agg renderer.
"""
def _draw_disabled(self):
"""
Context manager to temporary disable drawing.
This is used for getting the drawn size of Artists. This lets us
run the draw process to update any Python state but does not pay the
cost of the draw_XYZ calls on the canvas.
"""
no_ops = {
meth_name: lambda *args, **kwargs: None
for meth_name in dir(RendererBase)
if (meth_name.startswith("draw_")
or meth_name in ["open_group", "close_group"])
}
return _setattr_cm(self, **no_ops)
class GraphicsContextBase:
"""An abstract base class that provides color, line styles, etc."""
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0, 1 not True, False for extension code
self._capstyle = CapStyle('butt')
self._cliprect = None
self._clippath = None
self._dashes = 0, None
self._joinstyle = JoinStyle('round')
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._hatch_color = colors.to_rgba(rcParams['hatch.color'])
self._hatch_linewidth = rcParams['hatch.linewidth']
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
"""Copy properties from *gc* to self."""
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._hatch_color = gc._hatch_color
self._hatch_linewidth = gc._hatch_linewidth
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack.
"""
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends.
"""
return self._alpha
def get_antialiased(self):
"""Return whether the object should try to do antialiased rendering."""
return self._antialiased
def get_capstyle(self):
"""Return the `.CapStyle`."""
return self._capstyle.name
def get_clip_rectangle(self):
"""
Return the clip rectangle as a `~matplotlib.transforms.Bbox` instance.
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a `~.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
tpath, tr = self._clippath.get_transformed_path_and_affine()
if np.all(np.isfinite(tpath.vertices)):
return tpath, tr
else:
_log.warning("Ill-defined clip_path detected. Returning None.")
return None, None
return None, None
def get_dashes(self):
"""
Return the dash style as an (offset, dash-list) pair.
See `.set_dashes` for details.
Default value is (None, None).
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""Return the `.JoinStyle`."""
return self._joinstyle.name
def get_linewidth(self):
"""Return the line width in points."""
return self._linewidth
def get_rgb(self):
"""Return a tuple of three or four floats from 0-1."""
return self._rgb
def get_url(self):
"""Return a url if one is set, None otherwise."""
return self._url
def get_gid(self):
"""Return the object identifier if one is set, None otherwise."""
return self._gid
def get_snap(self):
"""
Return the snap setting, which can be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line segments,
round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._rgb, isRGBA=True)
def set_antialiased(self, b):
"""Set whether object should be drawn with antialiased rendering."""
# Use ints to make life easier on extension code trying to read the gc.
self._antialiased = int(bool(b))
@_docstring.interpd
def set_capstyle(self, cs):
"""
Set how to draw endpoints of lines.
Parameters
----------
cs : `.CapStyle` or %(CapStyle)s
"""
self._capstyle = CapStyle(cs)
def set_clip_rectangle(self, rectangle):
"""Set the clip rectangle to a `.Bbox` or None."""
self._cliprect = rectangle
def set_clip_path(self, path):
"""Set the clip path to a `.TransformedPath` or None."""
_api.check_isinstance((transforms.TransformedPath, None), path=path)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
Parameters
----------
dash_offset : float
Distance, in points, into the dash pattern at which to
start the pattern. It is usually set to 0.
dash_list : array-like or None
The on-off sequence as points. None specifies a solid line. All
values must otherwise be non-negative (:math:`\\ge 0`).
Notes
-----
See p. 666 of the PostScript
`Language Reference
`_
for more info.
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl < 0.0):
raise ValueError(
"All values in the dash list must be non-negative")
if dl.size and not np.any(dl > 0.0):
raise ValueError(
'At least one value in the dash list must be positive')
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color.
Parameters
----------
fg : :mpltype:`color`
isRGBA : bool
If *fg* is known to be an ``(r, g, b, a)`` tuple, *isRGBA* can be
set to True to improve performance.
"""
if self._forced_alpha and isRGBA:
self._rgb = fg[:3] + (self._alpha,)
elif self._forced_alpha:
self._rgb = colors.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.to_rgba(fg)
@_docstring.interpd
def set_joinstyle(self, js):
"""
Set how to draw connections between line segments.
Parameters
----------
js : `.JoinStyle` or %(JoinStyle)s
"""
self._joinstyle = JoinStyle(js)
def set_linewidth(self, w):
"""Set the linewidth in points."""
self._linewidth = float(w)
def set_url(self, url):
"""Set the url for links in compatible backends."""
self._url = url
def set_gid(self, id):
"""Set the id."""
self._gid = id
def set_snap(self, snap):
"""
Set the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line segments,
round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""Set the hatch style (for fills)."""
self._hatch = hatch
def get_hatch(self):
"""Get the current hatch style."""
return self._hatch
def get_hatch_path(self, density=6.0):
"""Return a `.Path` for the current hatch."""
hatch = self.get_hatch()
if hatch is None:
return None
return Path.hatch(hatch, density)
def get_hatch_color(self):
"""Get the hatch color."""
return self._hatch_color
def set_hatch_color(self, hatch_color):
"""Set the hatch color."""
self._hatch_color = hatch_color
def get_hatch_linewidth(self):
"""Get the hatch linewidth."""
return self._hatch_linewidth
def set_hatch_linewidth(self, hatch_linewidth):
"""Set the hatch linewidth."""
self._hatch_linewidth = hatch_linewidth
def get_sketch_params(self):
"""
Return the sketch parameters for the artist.
Returns
-------
tuple or `None`
A 3-tuple with the following elements:
* ``scale``: The amplitude of the wiggle perpendicular to the
source line.
* ``length``: The length of the wiggle along the line.
* ``randomness``: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Set the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line, in
pixels. If scale is `None`, or not provided, no sketch filter will
be provided.
length : float, default: 128
The length of the wiggle along the line, in pixels.
randomness : float, default: 16
The scale factor by which the length is shrunken or expanded.
"""
self._sketch = (
None if scale is None
else (scale, length or 128., randomness or 16.))
class TimerBase:
"""
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Subclasses must override the following methods:
- ``_timer_start``: Backend-specific code for starting the timer.
- ``_timer_stop``: Backend-specific code for stopping the timer.
Subclasses may additionally override the following methods:
- ``_timer_set_single_shot``: Code for setting the timer to single shot
operating mode, if supported by the timer object. If not, the `Timer`
class itself will store the flag and the ``_on_timer`` method should be
overridden to support such behavior.
- ``_timer_set_interval``: Code for setting the interval on the timer, if
there is a method for doing so on the timer object.
- ``_on_timer``: The internal function that any timer object should call,
which will handle the task of running all callbacks that have been set.
"""
def __init__(self, interval=None, callbacks=None):
"""
Parameters
----------
interval : int, default: 1000ms
The time between timer events in milliseconds. Will be stored as
``timer.interval``.
callbacks : list[tuple[callable, tuple, dict]]
List of (func, args, kwargs) tuples that will be called upon timer
events. This list is accessible as ``timer.callbacks`` and can be
manipulated directly, or the functions `~.TimerBase.add_callback`
and `~.TimerBase.remove_callback` can be used.
"""
self.callbacks = [] if callbacks is None else callbacks.copy()
# Set .interval and not ._interval to go through the property setter.
self.interval = 1000 if interval is None else interval
self.single_shot = False
def __del__(self):
"""Need to stop timer and possibly disconnect timer."""
self._timer_stop()
@_api.delete_parameter("3.9", "interval", alternative="timer.interval")
def start(self, interval=None):
"""
Start the timer object.
Parameters
----------
interval : int, optional
Timer interval in milliseconds; overrides a previously set interval
if provided.
"""
if interval is not None:
self.interval = interval
self._timer_start()
def stop(self):
"""Stop the timer."""
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
@property
def interval(self):
"""The time between timer events, in milliseconds."""
return self._interval
@interval.setter
def interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
# Some backends also fail when interval == 0, so ensure >= 1 msec
interval = max(int(interval), 1)
self._interval = interval
self._timer_set_interval()
@property
def single_shot(self):
"""Whether this timer should stop after a single run."""
return self._single
@single_shot.setter
def single_shot(self, ss):
self._single = ss
self._timer_set_single_shot()
def add_callback(self, func, *args, **kwargs):
"""
Register *func* to be called by timer when the event fires. Any
additional arguments provided will be passed to *func*.
This function returns *func*, which makes it possible to use it as a
decorator.
"""
self.callbacks.append((func, args, kwargs))
return func
def remove_callback(self, func, *args, **kwargs):
"""
Remove *func* from list of callbacks.
*args* and *kwargs* are optional and used to distinguish between copies
of the same function registered to be called with different arguments.
This behavior is deprecated. In the future, ``*args, **kwargs`` won't
be considered anymore; to keep a specific callback removable by itself,
pass it to `add_callback` as a `functools.partial` object.
"""
if args or kwargs:
_api.warn_deprecated(
"3.1", message="In a future version, Timer.remove_callback "
"will not take *args, **kwargs anymore, but remove all "
"callbacks where the callable matches; to keep a specific "
"callback removable by itself, pass it to add_callback as a "
"functools.partial object.")
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
"""Used to set interval on underlying timer object."""
def _timer_set_single_shot(self):
"""Used to set single shot on underlying timer object."""
def _on_timer(self):
"""
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
"""
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == 0` here,
# instead of `if not ret`.
# This will also catch `ret == False` as `False == 0`
# but does not annoy the linters
# https://docs.python.org/3/library/stdtypes.html#boolean-values
if ret == 0:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event:
"""
A Matplotlib event.
The following attributes are defined and shown with their default values.
Subclasses may define additional attributes.
Attributes
----------
name : str
The event name.
canvas : `FigureCanvasBase`
The backend-specific canvas instance generating the event.
guiEvent
The GUI event that triggered the Matplotlib event.
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
def _process(self):
"""Process this event on ``self.canvas``, then unset ``guiEvent``."""
self.canvas.callbacks.process(self.name, self)
self.guiEvent = None
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas.
In most backends, callbacks subscribed to this event will be fired after
the rendering is complete but before the screen is updated. Any extra
artists drawn to the canvas's renderer will be reflected without an
explicit call to ``blit``.
.. warning::
Calling ``canvas.draw`` and ``canvas.blit`` in these callbacks may
not be safe with all backends and may cause infinite recursion.
A DrawEvent has a number of special attributes in addition to those defined
by the parent `Event` class.
Attributes
----------
renderer : `RendererBase`
The renderer for the draw event.
"""
def __init__(self, name, canvas, renderer):
super().__init__(name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize.
A ResizeEvent has a number of special attributes in addition to those
defined by the parent `Event` class.
Attributes
----------
width : int
Width of the canvas in pixels.
height : int
Height of the canvas in pixels.
"""
def __init__(self, name, canvas):
super().__init__(name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""An event triggered by a figure being closed."""
class LocationEvent(Event):
"""
An event that has a screen location.
A LocationEvent has a number of special attributes in addition to those
defined by the parent `Event` class.
Attributes
----------
x, y : int or None
Event location in pixels from bottom left of canvas.
inaxes : `~matplotlib.axes.Axes` or None
The `~.axes.Axes` instance over which the mouse is, if any.
xdata, ydata : float or None
Data coordinates of the mouse within *inaxes*, or *None* if the mouse
is not over an Axes.
modifiers : frozenset
The keyboard modifiers currently being pressed (except for KeyEvent).
"""
_last_axes_ref = None
def __init__(self, name, canvas, x, y, guiEvent=None, *, modifiers=None):
super().__init__(name, canvas, guiEvent=guiEvent)
# x position - pixels from left of canvas
self.x = int(x) if x is not None else x
# y position - pixels from right of canvas
self.y = int(y) if y is not None else y
self.inaxes = None # the Axes instance the mouse is over
self.xdata = None # x coord of mouse in data coords
self.ydata = None # y coord of mouse in data coords
self.modifiers = frozenset(modifiers if modifiers is not None else [])
if x is None or y is None:
# cannot check if event was in Axes if no (x, y) info
return
self._set_inaxes(self.canvas.inaxes((x, y))
if self.canvas.mouse_grabber is None else
self.canvas.mouse_grabber,
(x, y))
# Splitting _set_inaxes out is useful for the axes_leave_event handler: it
# needs to generate synthetic LocationEvents with manually-set inaxes. In
# that latter case, xy has already been cast to int so it can directly be
# read from self.x, self.y; in the normal case, however, it is more
# accurate to pass the untruncated float x, y values passed to the ctor.
def _set_inaxes(self, inaxes, xy=None):
self.inaxes = inaxes
if inaxes is not None:
try:
self.xdata, self.ydata = inaxes.transData.inverted().transform(
xy if xy is not None else (self.x, self.y))
except ValueError:
pass
class MouseButton(IntEnum):
LEFT = 1
MIDDLE = 2
RIGHT = 3
BACK = 8
FORWARD = 9
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', \
'scroll_event', 'motion_notify_event').
A MouseEvent has a number of special attributes in addition to those
defined by the parent `Event` and `LocationEvent` classes.
Attributes
----------
button : None or `MouseButton` or {'up', 'down'}
The button pressed. 'up' and 'down' are used for scroll events.
Note that LEFT and RIGHT actually refer to the "primary" and
"secondary" buttons, i.e. if the user inverts their left and right
buttons ("left-handed setting") then the LEFT button will be the one
physically on the right.
If this is unset, *name* is "scroll_event", and *step* is nonzero, then
this will be set to "up" or "down" depending on the sign of *step*.
buttons : None or frozenset
For 'motion_notify_event', the mouse buttons currently being pressed
(a set of zero or more MouseButtons);
for other events, None.
.. note::
For 'motion_notify_event', this attribute is more accurate than
the ``button`` (singular) attribute, which is obtained from the last
'button_press_event' or 'button_release_event' that occurred within
the canvas (and thus 1. be wrong if the last change in mouse state
occurred when the canvas did not have focus, and 2. cannot report
when multiple buttons are pressed).
This attribute is not set for 'button_press_event' and
'button_release_event' because GUI toolkits are inconsistent as to
whether they report the button state *before* or *after* the
press/release occurred.
.. warning::
On macOS, the Tk backends only report a single button even if
multiple buttons are pressed.
key : None or str
The key pressed when the mouse event triggered, e.g. 'shift'.
See `KeyEvent`.
.. warning::
This key is currently obtained from the last 'key_press_event' or
'key_release_event' that occurred within the canvas. Thus, if the
last change of keyboard state occurred while the canvas did not have
focus, this attribute will be wrong. On the other hand, the
``modifiers`` attribute should always be correct, but it can only
report on modifier keys.
step : float
The number of scroll steps (positive for 'up', negative for 'down').
This applies only to 'scroll_event' and defaults to 0 otherwise.
dblclick : bool
Whether the event is a double-click. This applies only to
'button_press_event' and is False otherwise. In particular, it's
not used in 'button_release_event'.
Examples
--------
::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None, *,
buttons=None, modifiers=None):
super().__init__(
name, canvas, x, y, guiEvent=guiEvent, modifiers=modifiers)
if button in MouseButton.__members__.values():
button = MouseButton(button)
if name == "scroll_event" and button is None:
if step > 0:
button = "up"
elif step < 0:
button = "down"
self.button = button
if name == "motion_notify_event":
self.buttons = frozenset(buttons if buttons is not None else [])
else:
# We don't support 'buttons' for button_press/release_event because
# toolkits are inconsistent as to whether they report the state
# before or after the event.
if buttons:
raise ValueError(
"'buttons' is only supported for 'motion_notify_event'")
self.buttons = None
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return (f"{self.name}: "
f"xy=({self.x}, {self.y}) xydata=({self.xdata}, {self.ydata}) "
f"button={self.button} dblclick={self.dblclick} "
f"inaxes={self.inaxes}")
class PickEvent(Event):
"""
A pick event.
This event is fired when the user picks a location on the canvas
sufficiently close to an artist that has been made pickable with
`.Artist.set_picker`.
A PickEvent has a number of special attributes in addition to those defined
by the parent `Event` class.
Attributes
----------
mouseevent : `MouseEvent`
The mouse event that generated the pick.
artist : `~matplotlib.artist.Artist`
The picked artist. Note that artists are not pickable by default
(see `.Artist.set_picker`).
other
Additional attributes may be present depending on the type of the
picked object; e.g., a `.Line2D` pick may define different extra
attributes than a `.PatchCollection` pick.
Examples
--------
Bind a function ``on_pick()`` to pick events, that prints the coordinates
of the picked data point::
ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
line = event.artist
xdata, ydata = line.get_data()
ind = event.ind
print(f'on pick line: {xdata[ind]:.3f}, {ydata[ind]:.3f}')
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
if guiEvent is None:
guiEvent = mouseevent.guiEvent
super().__init__(name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
A KeyEvent has a number of special attributes in addition to those defined
by the parent `Event` and `LocationEvent` classes.
Attributes
----------
key : None or str
The key(s) pressed. Could be *None*, a single case sensitive Unicode
character ("g", "G", "#", etc.), a special key ("control", "shift",
"f1", "up", etc.) or a combination of the above (e.g., "ctrl+alt+g",
"ctrl+alt+G").
Notes
-----
Modifier keys will be prefixed to the pressed key and will be in the order
"ctrl", "alt", "super". The exception to this rule is when the pressed key
is itself a modifier key, therefore "ctrl+alt" and "alt+control" can both
be valid key values.
Examples
--------
::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
super().__init__(name, canvas, x, y, guiEvent=guiEvent)
self.key = key
# Default callback for key events.
def _key_handler(event):
# Dead reckoning of key.
if event.name == "key_press_event":
event.canvas._key = event.key
elif event.name == "key_release_event":
event.canvas._key = None
# Default callback for mouse events.
def _mouse_handler(event):
# Dead-reckoning of button and key.
if event.name == "button_press_event":
event.canvas._button = event.button
elif event.name == "button_release_event":
event.canvas._button = None
elif event.name == "motion_notify_event" and event.button is None:
event.button = event.canvas._button
if event.key is None:
event.key = event.canvas._key
# Emit axes_enter/axes_leave.
if event.name == "motion_notify_event":
last_ref = LocationEvent._last_axes_ref
last_axes = last_ref() if last_ref else None
if last_axes != event.inaxes:
if last_axes is not None:
# Create a synthetic LocationEvent for the axes_leave_event.
# Its inaxes attribute needs to be manually set (because the
# cursor is actually *out* of that Axes at that point); this is
# done with the internal _set_inaxes method which ensures that
# the xdata and ydata attributes are also correct.
try:
canvas = last_axes.get_figure(root=True).canvas
leave_event = LocationEvent(
"axes_leave_event", canvas,
event.x, event.y, event.guiEvent,
modifiers=event.modifiers)
leave_event._set_inaxes(last_axes)
canvas.callbacks.process("axes_leave_event", leave_event)
except Exception:
pass # The last canvas may already have been torn down.
if event.inaxes is not None:
event.canvas.callbacks.process("axes_enter_event", event)
LocationEvent._last_axes_ref = (
weakref.ref(event.inaxes) if event.inaxes else None)
def _get_renderer(figure, print_method=None):
"""
Get the renderer that would be used to save a `.Figure`.
If you need a renderer without any active draw methods use
renderer._draw_disabled to temporary patch them out at your call site.
"""
# This is implemented by triggering a draw, then immediately jumping out of
# Figure.draw() by raising an exception.
class Done(Exception):
pass
def _draw(renderer): raise Done(renderer)
with cbook._setattr_cm(figure, draw=_draw), ExitStack() as stack:
if print_method is None:
fmt = figure.canvas.get_default_filetype()
# Even for a canvas' default output type, a canvas switch may be
# needed, e.g. for FigureCanvasBase.
print_method = stack.enter_context(
figure.canvas._switch_canvas_and_return_print_method(fmt))
try:
print_method(io.BytesIO())
except Done as exc:
renderer, = exc.args
return renderer
else:
raise RuntimeError(f"{print_method} did not call Figure.draw, so "
f"no renderer is available")
def _no_output_draw(figure):
# _no_output_draw was promoted to the figure level, but
# keep this here in case someone was calling it...
figure.draw_without_rendering()
def _is_non_interactive_terminal_ipython(ip):
"""
Return whether we are in a terminal IPython, but non interactive.
When in _terminal_ IPython, ip.parent will have and `interact` attribute,
if this attribute is False we do not setup eventloop integration as the
user will _not_ interact with IPython. In all other case (ZMQKernel, or is
interactive), we do.
"""
return (hasattr(ip, 'parent')
and (ip.parent is not None)
and getattr(ip.parent, 'interact', None) is False)
@contextmanager
def _allow_interrupt(prepare_notifier, handle_sigint):
"""
A context manager that allows terminating a plot by sending a SIGINT. It
is necessary because the running backend prevents the Python interpreter
from running and processing signals (i.e., to raise a KeyboardInterrupt).
To solve this, one needs to somehow wake up the interpreter and make it
close the plot window. We do this by using the signal.set_wakeup_fd()
function which organizes a write of the signal number into a socketpair.
A backend-specific function, *prepare_notifier*, arranges to listen to
the pair's read socket while the event loop is running. (If it returns a
notifier object, that object is kept alive while the context manager runs.)
If SIGINT was indeed caught, after exiting the on_signal() function the
interpreter reacts to the signal according to the handler function which
had been set up by a signal.signal() call; here, we arrange to call the
backend-specific *handle_sigint* function, passing the notifier object
as returned by prepare_notifier(). Finally, we call the old SIGINT
handler with the same arguments that were given to our custom handler.
We do this only if the old handler for SIGINT was not None, which means
that a non-python handler was installed, i.e. in Julia, and not SIG_IGN
which means we should ignore the interrupts.
Parameters
----------
prepare_notifier : Callable[[socket.socket], object]
handle_sigint : Callable[[object], object]
"""
old_sigint_handler = signal.getsignal(signal.SIGINT)
if old_sigint_handler in (None, signal.SIG_IGN, signal.SIG_DFL):
yield
return
handler_args = None
wsock, rsock = socket.socketpair()
wsock.setblocking(False)
rsock.setblocking(False)
old_wakeup_fd = signal.set_wakeup_fd(wsock.fileno())
notifier = prepare_notifier(rsock)
def save_args_and_handle_sigint(*args):
nonlocal handler_args, notifier
handler_args = args
handle_sigint(notifier)
notifier = None
signal.signal(signal.SIGINT, save_args_and_handle_sigint)
try:
yield
finally:
wsock.close()
rsock.close()
signal.set_wakeup_fd(old_wakeup_fd)
signal.signal(signal.SIGINT, old_sigint_handler)
if handler_args is not None:
old_sigint_handler(*handler_args)
class FigureCanvasBase:
"""
The canvas the figure renders into.
Attributes
----------
figure : `~matplotlib.figure.Figure`
A high-level figure instance.
"""
# Set to one of {"qt", "gtk3", "gtk4", "wx", "tk", "macosx"} if an
# interactive framework is required, or None otherwise.
required_interactive_framework = None
# The manager class instantiated by new_manager.
# (This is defined as a classproperty because the manager class is
# currently defined *after* the canvas class, but one could also assign
# ``FigureCanvasBase.manager_class = FigureManagerBase``
# after defining both classes.)
manager_class = _api.classproperty(lambda cls: FigureManagerBase)
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
fixed_dpi = None
filetypes = _default_filetypes
@_api.classproperty
def supports_blit(cls):
"""If this Canvas sub-class supports blitting."""
return (hasattr(cls, "copy_from_bbox")
and hasattr(cls, "restore_region"))
def __init__(self, figure=None):
from matplotlib.figure import Figure
self._fix_ipython_backend2gui()
self._is_idle_drawing = True
self._is_saving = False
if figure is None:
figure = Figure()
figure.set_canvas(self)
self.figure = figure
self.manager = None
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self.mouse_grabber = None # the Axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_idle_drawing = False
# We don't want to scale up the figure DPI more than once.
figure._original_dpi = figure.dpi
self._device_pixel_ratio = 1
super().__init__() # Typically the GUI widget init (if any).
callbacks = property(lambda self: self.figure._canvas_callbacks)
button_pick_id = property(lambda self: self.figure._button_pick_id)
scroll_pick_id = property(lambda self: self.figure._scroll_pick_id)
@classmethod
@functools.cache
def _fix_ipython_backend2gui(cls):
# Fix hard-coded module -> toolkit mapping in IPython (used for
# `ipython --auto`). This cannot be done at import time due to
# ordering issues, so we do it when creating a canvas, and should only
# be done once per class (hence the `cache`).
# This function will not be needed when Python 3.12, the latest version
# supported by IPython < 8.24, reaches end-of-life in late 2028.
# At that time this function can be made a no-op and deprecated.
mod_ipython = sys.modules.get("IPython")
if mod_ipython is None or mod_ipython.version_info[:2] >= (8, 24):
# Use of backend2gui is not needed for IPython >= 8.24 as the
# functionality has been moved to Matplotlib.
return
import IPython
ip = IPython.get_ipython()
if not ip:
return
from IPython.core import pylabtools as pt
if (not hasattr(pt, "backend2gui")
or not hasattr(ip, "enable_matplotlib")):
# In case we ever move the patch to IPython and remove these APIs,
# don't break on our side.
return
backend2gui_rif = {
"qt": "qt",
"gtk3": "gtk3",
"gtk4": "gtk4",
"wx": "wx",
"macosx": "osx",
}.get(cls.required_interactive_framework)
if backend2gui_rif:
if _is_non_interactive_terminal_ipython(ip):
ip.enable_gui(backend2gui_rif)
@classmethod
def new_manager(cls, figure, num):
"""
Create a new figure manager for *figure*, using this canvas class.
Notes
-----
This method should not be reimplemented in subclasses. If
custom manager creation logic is needed, please reimplement
``FigureManager.create_with_canvas``.
"""
return cls.manager_class.create_with_canvas(cls, figure, num)
@contextmanager
def _idle_draw_cntx(self):
self._is_idle_drawing = True
try:
yield
finally:
self._is_idle_drawing = False
def is_saving(self):
"""
Return whether the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def blit(self, bbox=None):
"""Blit the canvas in bbox (default entire canvas)."""
def inaxes(self, xy):
"""
Return the topmost visible `~.axes.Axes` containing the point *xy*.
Parameters
----------
xy : (float, float)
(x, y) pixel positions from left/bottom of the canvas.
Returns
-------
`~matplotlib.axes.Axes` or None
The topmost visible Axes containing the point, or None if there
is no Axes at the point.
"""
axes_list = [a for a in self.figure.get_axes()
if a.patch.contains_point(xy) and a.get_visible()]
if axes_list:
axes = cbook._topmost_artist(axes_list)
else:
axes = None
return axes
def grab_mouse(self, ax):
"""
Set the child `~.axes.Axes` which is grabbing the mouse events.
Usually called by the widgets themselves. It is an error to call this
if the mouse is already grabbed by another Axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError("Another Axes already grabs mouse input")
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the `~.axes.Axes` *ax*.
Usually called by the widgets. It is ok to call this even if *ax*
doesn't have the mouse grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def set_cursor(self, cursor):
"""
Set the current cursor.
This may have no effect if the backend does not display anything.
If required by the backend, this method should trigger an update in
the backend event loop after the cursor is set, as this method may be
called e.g. before a long-running task during which the GUI is not
updated.
Parameters
----------
cursor : `.Cursors`
The cursor to display over the canvas. Note: some backends may
change the cursor for the entire window.
"""
def draw(self, *args, **kwargs):
"""
Render the `.Figure`.
This method must walk the artist tree, even if no output is produced,
because it triggers deferred work that users may want to access
before saving output to disk. For example computing limits,
auto-limits, and tick values.
"""
def draw_idle(self, *args, **kwargs):
"""
Request a widget redraw once control returns to the GUI event loop.
Even if multiple calls to `draw_idle` occur before control returns
to the GUI event loop, the figure will only be rendered once.
Notes
-----
Backends may choose to override the method and implement their own
strategy to prevent multiple renderings.
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
@property
def device_pixel_ratio(self):
"""
The ratio of physical to logical pixels used for the canvas on screen.
By default, this is 1, meaning physical and logical pixels are the same
size. Subclasses that support High DPI screens may set this property to
indicate that said ratio is different. All Matplotlib interaction,
unless working directly with the canvas, remains in logical pixels.
"""
return self._device_pixel_ratio
def _set_device_pixel_ratio(self, ratio):
"""
Set the ratio of physical to logical pixels used for the canvas.
Subclasses that support High DPI screens can set this property to
indicate that said ratio is different. The canvas itself will be
created at the physical size, while the client side will use the
logical size. Thus the DPI of the Figure will change to be scaled by
this ratio. Implementations that support High DPI screens should use
physical pixels for events so that transforms back to Axes space are
correct.
By default, this is 1, meaning physical and logical pixels are the same
size.
Parameters
----------
ratio : float
The ratio of logical to physical pixels used for the canvas.
Returns
-------
bool
Whether the ratio has changed. Backends may interpret this as a
signal to resize the window, repaint the canvas, or change any
other relevant properties.
"""
if self._device_pixel_ratio == ratio:
return False
# In cases with mixed resolution displays, we need to be careful if the
# device pixel ratio changes - in this case we need to resize the
# canvas accordingly. Some backends provide events that indicate a
# change in DPI, but those that don't will update this before drawing.
dpi = ratio * self.figure._original_dpi
self.figure._set_dpi(dpi, forward=False)
self._device_pixel_ratio = ratio
return True
def get_width_height(self, *, physical=False):
"""
Return the figure width and height in integral points or pixels.
When the figure is used on High DPI screens (and the backend supports
it), the truncation to integers occurs after scaling by the device
pixel ratio.
Parameters
----------
physical : bool, default: False
Whether to return true physical pixels or logical pixels. Physical
pixels may be used by backends that support HiDPI, but still
configure the canvas using its actual size.
Returns
-------
width, height : int
The size of the figure, in points or pixels, depending on the
backend.
"""
return tuple(int(size / (1 if physical else self.device_pixel_ratio))
for size in self.figure.bbox.max)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend."""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""
Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg'].
"""
groupings = {}
for ext, name in cls.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
@contextmanager
def _switch_canvas_and_return_print_method(self, fmt, backend=None):
"""
Context manager temporarily setting the canvas for saving the figure::
with (canvas._switch_canvas_and_return_print_method(fmt, backend)
as print_method):
# ``print_method`` is a suitable ``print_{fmt}`` method, and
# the figure's canvas is temporarily switched to the method's
# canvas within the with... block. ``print_method`` is also
# wrapped to suppress extra kwargs passed by ``print_figure``.
Parameters
----------
fmt : str
If *backend* is None, then determine a suitable canvas class for
saving to format *fmt* -- either the current canvas class, if it
supports *fmt*, or whatever `get_registered_canvas_class` returns;
switch the figure canvas to that canvas class.
backend : str or None, default: None
If not None, switch the figure canvas to the ``FigureCanvas`` class
of the given backend.
"""
canvas = None
if backend is not None:
# Return a specific canvas class, if requested.
from .backends.registry import backend_registry
canvas_class = backend_registry.load_backend_module(backend).FigureCanvas
if not hasattr(canvas_class, f"print_{fmt}"):
raise ValueError(
f"The {backend!r} backend does not support {fmt} output")
canvas = canvas_class(self.figure)
elif hasattr(self, f"print_{fmt}"):
# Return the current canvas if it supports the requested format.
canvas = self
else:
# Return a default canvas for the requested format, if it exists.
canvas_class = get_registered_canvas_class(fmt)
if canvas_class is None:
raise ValueError(
"Format {!r} is not supported (supported formats: {})".format(
fmt, ", ".join(sorted(self.get_supported_filetypes()))))
canvas = canvas_class(self.figure)
canvas._is_saving = self._is_saving
meth = getattr(canvas, f"print_{fmt}")
mod = (meth.func.__module__
if hasattr(meth, "func") # partialmethod, e.g. backend_wx.
else meth.__module__)
if mod.startswith(("matplotlib.", "mpl_toolkits.")):
optional_kws = { # Passed by print_figure for other renderers.
"dpi", "facecolor", "edgecolor", "orientation",
"bbox_inches_restore"}
skip = optional_kws - {*inspect.signature(meth).parameters}
print_method = functools.wraps(meth)(lambda *args, **kwargs: meth(
*args, **{k: v for k, v in kwargs.items() if k not in skip}))
else: # Let third-parties do as they see fit.
print_method = meth
try:
yield print_method
finally:
self.figure.canvas = self
def print_figure(
self, filename, dpi=None, facecolor=None, edgecolor=None,
orientation='portrait', format=None, *,
bbox_inches=None, pad_inches=None, bbox_extra_artists=None,
backend=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Parameters
----------
filename : str or path-like or file-like
The file where the figure is saved.
dpi : float, default: :rc:`savefig.dpi`
The dots per inch to save the figure in.
facecolor : :mpltype:`color` or 'auto', default: :rc:`savefig.facecolor`
The facecolor of the figure. If 'auto', use the current figure
facecolor.
edgecolor : :mpltype:`color` or 'auto', default: :rc:`savefig.edgecolor`
The edgecolor of the figure. If 'auto', use the current figure
edgecolor.
orientation : {'landscape', 'portrait'}, default: 'portrait'
Only currently applies to PostScript printing.
format : str, optional
Force a specific file format. If not given, the format is inferred
from the *filename* extension, and if that fails from
:rc:`savefig.format`.
bbox_inches : 'tight' or `.Bbox`, default: :rc:`savefig.bbox`
Bounding box in inches: only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of the figure.
pad_inches : float or 'layout', default: :rc:`savefig.pad_inches`
Amount of padding in inches around the figure when bbox_inches is
'tight'. If 'layout' use the padding from the constrained or
compressed layout engine; ignored if one of those engines is not in
use.
bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
A list of extra artists that will be considered when the
tight bbox is calculated.
backend : str, optional
Use a non-default backend to render the file, e.g. to render a
png file with the "cairo" backend rather than the default "agg",
or a pdf file with the "pgf" backend rather than the default
"pdf". Note that the default backend is normally sufficient. See
:ref:`the-builtin-backends` for a list of valid backends for each
file format. Custom backends can be referenced as "module://...".
"""
if format is None:
# get format from filename, or from backend's default filetype
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
if isinstance(filename, str):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if isinstance(filename, str):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = getattr(self.figure, '_original_dpi', self.figure.dpi)
# Remove the figure manager, if any, to avoid resizing the GUI widget.
with (cbook._setattr_cm(self, manager=None),
self._switch_canvas_and_return_print_method(format, backend)
as print_method,
cbook._setattr_cm(self.figure, dpi=dpi),
cbook._setattr_cm(self.figure.canvas, _device_pixel_ratio=1),
cbook._setattr_cm(self.figure.canvas, _is_saving=True),
ExitStack() as stack):
for prop in ["facecolor", "edgecolor"]:
color = locals()[prop]
if color is None:
color = rcParams[f"savefig.{prop}"]
if not cbook._str_equal(color, "auto"):
stack.enter_context(self.figure._cm_set(**{prop: color}))
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
layout_engine = self.figure.get_layout_engine()
if layout_engine is not None or bbox_inches == "tight":
# we need to trigger a draw before printing to make sure
# CL works. "tight" also needs a draw to get the right
# locations:
renderer = _get_renderer(
self.figure,
functools.partial(
print_method, orientation=orientation)
)
# we do this instead of `self.figure.draw_without_rendering`
# so that we can inject the orientation
with getattr(renderer, "_draw_disabled", nullcontext)():
self.figure.draw(renderer)
if bbox_inches:
if bbox_inches == "tight":
bbox_inches = self.figure.get_tightbbox(
renderer, bbox_extra_artists=bbox_extra_artists)
if (isinstance(layout_engine, ConstrainedLayoutEngine) and
pad_inches == "layout"):
h_pad = layout_engine.get()["h_pad"]
w_pad = layout_engine.get()["w_pad"]
else:
if pad_inches in [None, "layout"]:
pad_inches = rcParams['savefig.pad_inches']
h_pad = w_pad = pad_inches
bbox_inches = bbox_inches.padded(w_pad, h_pad)
# call adjust_bbox to save only the given area
restore_bbox = _tight_bbox.adjust_bbox(
self.figure, bbox_inches, self.figure.canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
# we have already done layout above, so turn it off:
stack.enter_context(self.figure._cm_set(layout_engine='none'))
try:
# _get_renderer may change the figure dpi (as vector formats
# force the figure dpi to 72), so we need to set it again here.
with cbook._setattr_cm(self.figure, dpi=dpi):
result = print_method(
filename,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
return result
@classmethod
def get_default_filetype(cls):
"""
Return the default savefig file format as specified in
:rc:`savefig.format`.
The returned string does not include a period. This method is
overridden in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_default_filename(self):
"""
Return a suitable default filename, including the extension.
"""
default_basename = (
self.manager.get_window_title()
if self.manager is not None
else ''
)
default_basename = default_basename or 'image'
# Characters to be avoided in a NT path:
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#naming_conventions
# plus ' '
removed_chars = '<>:"/\\|?*\0 '
default_basename = default_basename.translate(
{ord(c): "_" for c in removed_chars})
default_filetype = self.get_default_filetype()
return f'{default_basename}.{default_filetype}'
def mpl_connect(self, s, func):
"""
Bind function *func* to event *s*.
Parameters
----------
s : str
One of the following events ids:
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'.
func : callable
The callback function to be executed, which must have the
signature::
def func(event: Event) -> Any
For the location events (button and key press/release), if the
mouse is over the Axes, the ``inaxes`` attribute of the event will
be set to the `~matplotlib.axes.Axes` the event occurs is over, and
additionally, the variables ``xdata`` and ``ydata`` attributes will
be set to the mouse location in data coordinates. See `.KeyEvent`
and `.MouseEvent` for more info.
.. note::
If func is a method, this only stores a weak reference to the
method. Thus, the figure does not influence the lifetime of
the associated object. Usually, you want to make sure that the
object is kept alive throughout the lifetime of the figure by
holding a reference to it.
Returns
-------
cid
A connection id that can be used with
`.FigureCanvasBase.mpl_disconnect`.
Examples
--------
::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect the callback with id *cid*.
Examples
--------
::
cid = canvas.mpl_connect('button_press_event', on_press)
# ... later
canvas.mpl_disconnect(cid)
"""
self.callbacks.disconnect(cid)
# Internal subclasses can override _timer_cls instead of new_timer, though
# this is not a public API for third-party subclasses.
_timer_cls = TimerBase
def new_timer(self, interval=None, callbacks=None):
"""
Create a new backend-specific subclass of `.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
Parameters
----------
interval : int
Timer interval in milliseconds.
callbacks : list[tuple[callable, tuple, dict]]
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
Callbacks which return ``False`` or ``0`` will be removed from the
timer.
Examples
--------
>>> timer = fig.canvas.new_timer(callbacks=[(f1, (1,), {'a': 3})])
"""
return self._timer_cls(interval=interval, callbacks=callbacks)
def flush_events(self):
"""
Flush the GUI events for the figure.
Interactive backends need to reimplement this method.
"""
def start_event_loop(self, timeout=0):
"""
Start a blocking event loop.
Such an event loop is used by interactive functions, such as
`~.Figure.ginput` and `~.Figure.waitforbuttonpress`, to wait for
events.
The event loop blocks until a callback function triggers
`stop_event_loop`, or *timeout* is reached.
If *timeout* is 0 or negative, never timeout.
Only interactive backends need to reimplement this method and it relies
on `flush_events` being properly implemented.
Interactive backends should implement this in a more native way.
"""
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop(self):
"""
Stop the current blocking event loop.
Interactive backends need to reimplement this to match
`start_event_loop`
"""
self._looping = False
def key_press_handler(event, canvas=None, toolbar=None):
"""
Implement the default Matplotlib key bindings for the canvas and toolbar
described at :ref:`key-event-handling`.
Parameters
----------
event : `KeyEvent`
A key press/release event.
canvas : `FigureCanvasBase`, default: ``event.canvas``
The backend-specific canvas instance. This parameter is kept for
back-compatibility, but, if set, should always be equal to
``event.canvas``.
toolbar : `NavigationToolbar2`, default: ``event.canvas.toolbar``
The navigation cursor toolbar. This parameter is kept for
back-compatibility, but, if set, should always be equal to
``event.canvas.toolbar``.
"""
if event.key is None:
return
if canvas is None:
canvas = event.canvas
if toolbar is None:
toolbar = canvas.toolbar
# toggle fullscreen mode (default key 'f', 'ctrl + f')
if event.key in rcParams['keymap.fullscreen']:
try:
canvas.manager.full_screen_toggle()
except AttributeError:
pass
# quit the figure (default key 'ctrl+w')
if event.key in rcParams['keymap.quit']:
Gcf.destroy_fig(canvas.figure)
if event.key in rcParams['keymap.quit_all']:
Gcf.destroy_all()
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in rcParams['keymap.home']:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in rcParams['keymap.back']:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in rcParams['keymap.forward']:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in rcParams['keymap.pan']:
toolbar.pan()
toolbar._update_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in rcParams['keymap.zoom']:
toolbar.zoom()
toolbar._update_cursor(event)
# saving current figure (default key 's')
elif event.key in rcParams['keymap.save']:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an Axes to trigger
def _get_uniform_gridstate(ticks):
# Return True/False if all grid lines are on or off, None if they are
# not all in the same state.
return (True if all(tick.gridline.get_visible() for tick in ticks) else
False if not any(tick.gridline.get_visible() for tick in ticks) else
None)
ax = event.inaxes
# toggle major grids in current Axes (default key 'g')
# Both here and below (for 'G'), we do nothing if *any* grid (major or
# minor, x or y) is not in a uniform state, to avoid messing up user
# customization.
if (event.key in rcParams['keymap.grid']
# Exclude minor grids not in a uniform state.
and None not in [_get_uniform_gridstate(ax.xaxis.minorTicks),
_get_uniform_gridstate(ax.yaxis.minorTicks)]):
x_state = _get_uniform_gridstate(ax.xaxis.majorTicks)
y_state = _get_uniform_gridstate(ax.yaxis.majorTicks)
cycle = [(False, False), (True, False), (True, True), (False, True)]
try:
x_state, y_state = (
cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
except ValueError:
# Exclude major grids not in a uniform state.
pass
else:
# If turning major grids off, also turn minor grids off.
ax.grid(x_state, which="major" if x_state else "both", axis="x")
ax.grid(y_state, which="major" if y_state else "both", axis="y")
canvas.draw_idle()
# toggle major and minor grids in current Axes (default key 'G')
if (event.key in rcParams['keymap.grid_minor']
# Exclude major grids not in a uniform state.
and None not in [_get_uniform_gridstate(ax.xaxis.majorTicks),
_get_uniform_gridstate(ax.yaxis.majorTicks)]):
x_state = _get_uniform_gridstate(ax.xaxis.minorTicks)
y_state = _get_uniform_gridstate(ax.yaxis.minorTicks)
cycle = [(False, False), (True, False), (True, True), (False, True)]
try:
x_state, y_state = (
cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
except ValueError:
# Exclude minor grids not in a uniform state.
pass
else:
ax.grid(x_state, which="both", axis="x")
ax.grid(y_state, which="both", axis="y")
canvas.draw_idle()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in rcParams['keymap.yscale']:
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.get_figure(root=True).canvas.draw_idle()
elif scale == 'linear':
try:
ax.set_yscale('log')
except ValueError as exc:
_log.warning(str(exc))
ax.set_yscale('linear')
ax.get_figure(root=True).canvas.draw_idle()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in rcParams['keymap.xscale']:
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.get_figure(root=True).canvas.draw_idle()
elif scalex == 'linear':
try:
ax.set_xscale('log')
except ValueError as exc:
_log.warning(str(exc))
ax.set_xscale('linear')
ax.get_figure(root=True).canvas.draw_idle()
def button_press_handler(event, canvas=None, toolbar=None):
"""
The default Matplotlib button actions for extra mouse buttons.
Parameters are as for `key_press_handler`, except that *event* is a
`MouseEvent`.
"""
if canvas is None:
canvas = event.canvas
if toolbar is None:
toolbar = canvas.toolbar
if toolbar is not None:
button_name = str(MouseButton(event.button))
if button_name in rcParams['keymap.back']:
toolbar.back()
elif button_name in rcParams['keymap.forward']:
toolbar.forward()
class NonGuiException(Exception):
"""Raised when trying show a figure in a non-GUI backend."""
pass
class FigureManagerBase:
"""
A backend-independent abstraction of a figure container and controller.
The figure manager is used by pyplot to interact with the window in a
backend-independent way. It's an adapter for the real (GUI) framework that
represents the visual figure on screen.
The figure manager is connected to a specific canvas instance, which in turn
is connected to a specific figure instance. To access a figure manager for
a given figure in user code, you typically use ``fig.canvas.manager``.
GUI backends derive from this class to translate common operations such
as *show* or *resize* to the GUI-specific code. Non-GUI backends do not
support these operations and can just use the base class.
This following basic operations are accessible:
**Window operations**
- `~.FigureManagerBase.show`
- `~.FigureManagerBase.destroy`
- `~.FigureManagerBase.full_screen_toggle`
- `~.FigureManagerBase.resize`
- `~.FigureManagerBase.get_window_title`
- `~.FigureManagerBase.set_window_title`
**Key and mouse button press handling**
The figure manager sets up default key and mouse button press handling by
hooking up the `.key_press_handler` to the matplotlib event system. This
ensures the same shortcuts and mouse actions across backends.
**Other operations**
Subclasses will have additional attributes and functions to access
additional functionality. This is of course backend-specific. For example,
most GUI backends have ``window`` and ``toolbar`` attributes that give
access to the native GUI widgets of the respective framework.
Attributes
----------
canvas : `FigureCanvasBase`
The backend-specific canvas instance.
num : int or str
The figure number.
key_press_handler_id : int
The default key handler cid, when using the toolmanager.
To disable the default key press handling use::
figure.canvas.mpl_disconnect(
figure.canvas.manager.key_press_handler_id)
button_press_handler_id : int
The default mouse button handler cid, when using the toolmanager.
To disable the default button press handling use::
figure.canvas.mpl_disconnect(
figure.canvas.manager.button_press_handler_id)
"""
_toolbar2_class = None
_toolmanager_toolbar_class = None
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.set_window_title(f"Figure {num:d}")
self.key_press_handler_id = None
self.button_press_handler_id = None
if rcParams['toolbar'] != 'toolmanager':
self.key_press_handler_id = self.canvas.mpl_connect(
'key_press_event', key_press_handler)
self.button_press_handler_id = self.canvas.mpl_connect(
'button_press_event', button_press_handler)
self.toolmanager = (ToolManager(canvas.figure)
if mpl.rcParams['toolbar'] == 'toolmanager'
else None)
if (mpl.rcParams["toolbar"] == "toolbar2"
and self._toolbar2_class):
self.toolbar = self._toolbar2_class(self.canvas)
elif (mpl.rcParams["toolbar"] == "toolmanager"
and self._toolmanager_toolbar_class):
self.toolbar = self._toolmanager_toolbar_class(self.toolmanager)
else:
self.toolbar = None
if self.toolmanager:
tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
tools.add_tools_to_container(self.toolbar)
@self.canvas.figure.add_axobserver
def notify_axes_change(fig):
# Called whenever the current Axes is changed.
if self.toolmanager is None and self.toolbar is not None:
self.toolbar.update()
@classmethod
def create_with_canvas(cls, canvas_class, figure, num):
"""
Create a manager for a given *figure* using a specific *canvas_class*.
Backends should override this method if they have specific needs for
setting up the canvas or the manager.
"""
return cls(canvas_class(figure), num)
@classmethod
def start_main_loop(cls):
"""
Start the main event loop.
This method is called by `.FigureManagerBase.pyplot_show`, which is the
implementation of `.pyplot.show`. To customize the behavior of
`.pyplot.show`, interactive backends should usually override
`~.FigureManagerBase.start_main_loop`; if more customized logic is
necessary, `~.FigureManagerBase.pyplot_show` can also be overridden.
"""
@classmethod
def pyplot_show(cls, *, block=None):
"""
Show all figures. This method is the implementation of `.pyplot.show`.
To customize the behavior of `.pyplot.show`, interactive backends
should usually override `~.FigureManagerBase.start_main_loop`; if more
customized logic is necessary, `~.FigureManagerBase.pyplot_show` can
also be overridden.
Parameters
----------
block : bool, optional
Whether to block by calling ``start_main_loop``. The default,
None, means to block if we are neither in IPython's ``%pylab`` mode
nor in ``interactive`` mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
try:
manager.show() # Emits a warning for non-interactive backend.
except NonGuiException as exc:
_api.warn_external(str(exc))
if block is None:
# Hack: Are we in IPython's %pylab mode? In pylab mode, IPython
# (>= 0.10) tacks a _needmain attribute onto pyplot.show (always
# set to False).
pyplot_show = getattr(sys.modules.get("matplotlib.pyplot"), "show", None)
ipython_pylab = hasattr(pyplot_show, "_needmain")
block = not ipython_pylab and not is_interactive()
if block:
cls.start_main_loop()
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception, unless running headless (i.e.
on Linux with an unset DISPLAY); this exception is converted to a
warning in `.Figure.show`.
"""
# This should be overridden in GUI backends.
if sys.platform == "linux" and not os.environ.get("DISPLAY"):
# We cannot check _get_running_interactive_framework() ==
# "headless" because that would also suppress the warning when
# $DISPLAY exists but is invalid, which is more likely an error and
# thus warrants a warning.
return
raise NonGuiException(
f"{type(self.canvas).__name__} is non-interactive, and thus cannot be "
f"shown")
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
"""For GUI backends, resize the window (in physical pixels)."""
def get_window_title(self):
"""Return the title text of the window containing the figure."""
return self._window_title
def set_window_title(self, title):
"""
Set the title text of the window containing the figure.
Examples
--------
>>> fig = plt.figure()
>>> fig.canvas.manager.set_window_title('My figure')
"""
# This attribute is not defined in __init__ (but __init__ calls this
# setter), as derived classes (real GUI managers) will store this
# information directly on the widget; only the base (non-GUI) manager
# class needs a specific attribute for it (so that filename escaping
# can be checked in the test suite).
self._window_title = title
cursors = tools.cursors
class _Mode(str, Enum):
NONE = ""
PAN = "pan/zoom"
ZOOM = "zoom rect"
def __str__(self):
return self.value
@property
def _navigate_mode(self):
return self.name if self is not _Mode.NONE else None
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2.
Backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information.
They must also define
:meth:`save_figure`
Save the current figure.
:meth:`draw_rubberband` (optional)
Draw the zoom to rect "rubberband" rectangle.
:meth:`set_message` (optional)
Display message.
:meth:`set_history_buttons` (optional)
You can change the history back / forward buttons to indicate disabled / enabled
state.
and override ``__init__`` to set up the toolbar -- without forgetting to
call the base-class init. Typically, ``__init__`` needs to set up toolbar
buttons connected to the `home`, `back`, `forward`, `pan`, `zoom`, and
`save_figure` methods and using standard icons in the "images" subdirectory
of the data path.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan',
'Left button pans, Right button zooms\n'
'x/y fixes axis, CTRL fixes aspect',
'move', 'pan'),
('Zoom', 'Zoom to rectangle\nx/y fixes axis', 'zoom_to_rect', 'zoom'),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
UNKNOWN_SAVED_STATUS = object()
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
self._nav_stack = cbook._Stack()
# This cursor will be set after the initial draw.
self._last_cursor = tools.Cursors.POINTER
self._id_press = self.canvas.mpl_connect(
'button_press_event', self._zoom_pan_handler)
self._id_release = self.canvas.mpl_connect(
'button_release_event', self._zoom_pan_handler)
self._id_drag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._pan_info = None
self._zoom_info = None
self.mode = _Mode.NONE # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar."""
def draw_rubberband(self, event, x0, y0, x1, y1):
"""
Draw a rectangle rubberband to indicate zoom limits.
Note that it is not guaranteed that ``x0 <= x1`` and ``y0 <= y1``.
"""
def remove_rubberband(self):
"""Remove the rubberband."""
def home(self, *args):
"""
Restore the original view.
For convenience of being directly connected as a GUI callback, which
often get passed additional parameters, this method accepts arbitrary
parameters, but does not use them.
"""
self._nav_stack.home()
self.set_history_buttons()
self._update_view()
def back(self, *args):
"""
Move back up the view lim stack.
For convenience of being directly connected as a GUI callback, which
often get passed additional parameters, this method accepts arbitrary
parameters, but does not use them.
"""
self._nav_stack.back()
self.set_history_buttons()
self._update_view()
def forward(self, *args):
"""
Move forward in the view lim stack.
For convenience of being directly connected as a GUI callback, which
often get passed additional parameters, this method accepts arbitrary
parameters, but does not use them.
"""
self._nav_stack.forward()
self.set_history_buttons()
self._update_view()
def _update_cursor(self, event):
"""
Update the cursor after a mouse move event or a tool (de)activation.
"""
if self.mode and event.inaxes and event.inaxes.get_navigate():
if (self.mode == _Mode.ZOOM
and self._last_cursor != tools.Cursors.SELECT_REGION):
self.canvas.set_cursor(tools.Cursors.SELECT_REGION)
self._last_cursor = tools.Cursors.SELECT_REGION
elif (self.mode == _Mode.PAN
and self._last_cursor != tools.Cursors.MOVE):
self.canvas.set_cursor(tools.Cursors.MOVE)
self._last_cursor = tools.Cursors.MOVE
elif self._last_cursor != tools.Cursors.POINTER:
self.canvas.set_cursor(tools.Cursors.POINTER)
self._last_cursor = tools.Cursors.POINTER
@contextmanager
def _wait_cursor_for_draw_cm(self):
"""
Set the cursor to a wait cursor when drawing the canvas.
In order to avoid constantly changing the cursor when the canvas
changes frequently, do nothing if this context was triggered during the
last second. (Optimally we'd prefer only setting the wait cursor if
the *current* draw takes too long, but the current draw blocks the GUI
thread).
"""
self._draw_time, last_draw_time = (
time.time(), getattr(self, "_draw_time", -np.inf))
if self._draw_time - last_draw_time > 1:
try:
self.canvas.set_cursor(tools.Cursors.WAIT)
yield
finally:
self.canvas.set_cursor(self._last_cursor)
else:
yield
@staticmethod
def _mouse_event_to_message(event):
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
s = s.rstrip()
artists = [a for a in event.inaxes._mouseover_set
if a.contains(event)[0] and a.get_visible()]
if artists:
a = cbook._topmost_artist(artists)
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if data is not None:
data_str = a.format_cursor_data(data).rstrip()
if data_str:
s = s + '\n' + data_str
return s
return ""
def mouse_move(self, event):
self._update_cursor(event)
self.set_message(self._mouse_event_to_message(event))
def _zoom_pan_handler(self, event):
if self.mode == _Mode.PAN:
if event.name == "button_press_event":
self.press_pan(event)
elif event.name == "button_release_event":
self.release_pan(event)
if self.mode == _Mode.ZOOM:
if event.name == "button_press_event":
self.press_zoom(event)
elif event.name == "button_release_event":
self.release_zoom(event)
def _start_event_axes_interaction(self, event, *, method):
def _ax_filter(ax):
return (ax.in_axes(event) and
ax.get_navigate() and
getattr(ax, f"can_{method}")()
)
def _capture_events(ax):
f = ax.get_forward_navigation_events()
if f == "auto": # (capture = patch visibility)
f = not ax.patch.get_visible()
return not f
# get all relevant axes for the event
axes = list(filter(_ax_filter, self.canvas.figure.get_axes()))
if len(axes) == 0:
return []
if self._nav_stack() is None:
self.push_current() # Set the home button to this view.
# group axes by zorder (reverse to trigger later axes first)
grps = dict()
for ax in reversed(axes):
grps.setdefault(ax.get_zorder(), []).append(ax)
axes_to_trigger = []
# go through zorders in reverse until we hit a capturing axes
for zorder in sorted(grps, reverse=True):
for ax in grps[zorder]:
axes_to_trigger.append(ax)
# NOTE: shared axes are automatically triggered, but twin-axes not!
axes_to_trigger.extend(ax._twinned_axes.get_siblings(ax))
if _capture_events(ax):
break # break if we hit a capturing axes
else:
# If the inner loop finished without an explicit break,
# (e.g. no capturing axes was found) continue the
# outer loop to the next zorder.
continue
# If the inner loop was terminated with an explicit break,
# terminate the outer loop as well.
break
# avoid duplicated triggers (but keep order of list)
axes_to_trigger = list(dict.fromkeys(axes_to_trigger))
return axes_to_trigger
def pan(self, *args):
"""
Toggle the pan/zoom tool.
Pan with left button, zoom with right.
"""
if not self.canvas.widgetlock.available(self):
self.set_message("pan unavailable")
return
if self.mode == _Mode.PAN:
self.mode = _Mode.NONE
self.canvas.widgetlock.release(self)
else:
self.mode = _Mode.PAN
self.canvas.widgetlock(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self.mode._navigate_mode)
_PanInfo = namedtuple("_PanInfo", "button axes cid")
def press_pan(self, event):
"""Callback for mouse button press in pan/zoom mode."""
if (event.button not in [MouseButton.LEFT, MouseButton.RIGHT]
or event.x is None or event.y is None):
return
axes = self._start_event_axes_interaction(event, method="pan")
if not axes:
return
# call "ax.start_pan(..)" on all relevant axes of an event
for ax in axes:
ax.start_pan(event.x, event.y, event.button)
self.canvas.mpl_disconnect(self._id_drag)
id_drag = self.canvas.mpl_connect("motion_notify_event", self.drag_pan)
self._pan_info = self._PanInfo(
button=event.button, axes=axes, cid=id_drag)
def drag_pan(self, event):
"""Callback for dragging in pan/zoom mode."""
for ax in self._pan_info.axes:
# Using the recorded button at the press is safer than the current
# button, as multiple buttons can get pressed during motion.
ax.drag_pan(self._pan_info.button, event.key, event.x, event.y)
self.canvas.draw_idle()
def release_pan(self, event):
"""Callback for mouse button release in pan/zoom mode."""
if self._pan_info is None:
return
self.canvas.mpl_disconnect(self._pan_info.cid)
self._id_drag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for ax in self._pan_info.axes:
ax.end_pan()
self.canvas.draw_idle()
self._pan_info = None
self.push_current()
def zoom(self, *args):
if not self.canvas.widgetlock.available(self):
self.set_message("zoom unavailable")
return
"""Toggle zoom to rect mode."""
if self.mode == _Mode.ZOOM:
self.mode = _Mode.NONE
self.canvas.widgetlock.release(self)
else:
self.mode = _Mode.ZOOM
self.canvas.widgetlock(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self.mode._navigate_mode)
_ZoomInfo = namedtuple("_ZoomInfo", "direction start_xy axes cid cbar")
def press_zoom(self, event):
"""Callback for mouse button press in zoom to rect mode."""
if (event.button not in [MouseButton.LEFT, MouseButton.RIGHT]
or event.x is None or event.y is None):
return
axes = self._start_event_axes_interaction(event, method="zoom")
if not axes:
return
id_zoom = self.canvas.mpl_connect(
"motion_notify_event", self.drag_zoom)
# A colorbar is one-dimensional, so we extend the zoom rectangle out
# to the edge of the Axes bbox in the other dimension. To do that we
# store the orientation of the colorbar for later.
parent_ax = axes[0]
if hasattr(parent_ax, "_colorbar"):
cbar = parent_ax._colorbar.orientation
else:
cbar = None
self._zoom_info = self._ZoomInfo(
direction="in" if event.button == 1 else "out",
start_xy=(event.x, event.y), axes=axes, cid=id_zoom, cbar=cbar)
def drag_zoom(self, event):
"""Callback for dragging in zoom mode."""
start_xy = self._zoom_info.start_xy
ax = self._zoom_info.axes[0]
(x1, y1), (x2, y2) = np.clip(
[start_xy, [event.x, event.y]], ax.bbox.min, ax.bbox.max)
key = event.key
# Force the key on colorbars to extend the short-axis bbox
if self._zoom_info.cbar == "horizontal":
key = "x"
elif self._zoom_info.cbar == "vertical":
key = "y"
if key == "x":
y1, y2 = ax.bbox.intervaly
elif key == "y":
x1, x2 = ax.bbox.intervalx
self.draw_rubberband(event, x1, y1, x2, y2)
def release_zoom(self, event):
"""Callback for mouse button release in zoom to rect mode."""
if self._zoom_info is None:
return
# We don't check the event button here, so that zooms can be cancelled
# by (pressing and) releasing another mouse button.
self.canvas.mpl_disconnect(self._zoom_info.cid)
self.remove_rubberband()
start_x, start_y = self._zoom_info.start_xy
key = event.key
# Force the key on colorbars to ignore the zoom-cancel on the
# short-axis side
if self._zoom_info.cbar == "horizontal":
key = "x"
elif self._zoom_info.cbar == "vertical":
key = "y"
# Ignore single clicks: 5 pixels is a threshold that allows the user to
# "cancel" a zoom action by zooming by less than 5 pixels.
if ((abs(event.x - start_x) < 5 and key != "y") or
(abs(event.y - start_y) < 5 and key != "x")):
self.canvas.draw_idle()
self._zoom_info = None
return
for i, ax in enumerate(self._zoom_info.axes):
# Detect whether this Axes is twinned with an earlier Axes in the
# list of zoomed Axes, to avoid double zooming.
twinx = any(ax.get_shared_x_axes().joined(ax, prev)
for prev in self._zoom_info.axes[:i])
twiny = any(ax.get_shared_y_axes().joined(ax, prev)
for prev in self._zoom_info.axes[:i])
ax._set_view_from_bbox(
(start_x, start_y, event.x, event.y),
self._zoom_info.direction, key, twinx, twiny)
self.canvas.draw_idle()
self._zoom_info = None
self.push_current()
def push_current(self):
"""Push the current view limits and position onto the stack."""
self._nav_stack.push(
WeakKeyDictionary(
{ax: (ax._get_view(),
# Store both the original and modified positions.
(ax.get_position(True).frozen(),
ax.get_position().frozen()))
for ax in self.canvas.figure.axes}))
self.set_history_buttons()
def _update_view(self):
"""
Update the viewlim and position from the view and position stack for
each Axes.
"""
nav_info = self._nav_stack()
if nav_info is None:
return
# Retrieve all items at once to avoid any risk of GC deleting an Axes
# while in the middle of the loop below.
items = list(nav_info.items())
for ax, (view, (pos_orig, pos_active)) in items:
ax._set_view(view)
# Restore both the original and modified positions
ax._set_position(pos_orig, 'original')
ax._set_position(pos_active, 'active')
self.canvas.draw_idle()
def configure_subplots(self, *args):
if hasattr(self, "subplot_tool"):
self.subplot_tool.figure.canvas.manager.show()
return
# This import needs to happen here due to circular imports.
from matplotlib.figure import Figure
with mpl.rc_context({"toolbar": "none"}): # No navbar for the toolfig.
manager = type(self.canvas).new_manager(Figure(figsize=(6, 3)), -1)
manager.set_window_title("Subplot configuration tool")
tool_fig = manager.canvas.figure
tool_fig.subplots_adjust(top=0.9)
self.subplot_tool = widgets.SubplotTool(self.canvas.figure, tool_fig)
cid = self.canvas.mpl_connect(
"close_event", lambda e: manager.destroy())
def on_tool_fig_close(e):
self.canvas.mpl_disconnect(cid)
del self.subplot_tool
tool_fig.canvas.mpl_connect("close_event", on_tool_fig_close)
manager.show()
return self.subplot_tool
def save_figure(self, *args):
"""
Save the current figure.
Backend implementations may choose to return
the absolute path of the saved file, if any, as
a string.
If no file is created then `None` is returned.
If the backend does not implement this functionality
then `NavigationToolbar2.UNKNOWN_SAVED_STATUS` is returned.
Returns
-------
str or `NavigationToolbar2.UNKNOWN_SAVED_STATUS` or `None`
The filepath of the saved figure.
Returns `None` if figure is not saved.
Returns `NavigationToolbar2.UNKNOWN_SAVED_STATUS` when
the backend does not provide the information.
"""
raise NotImplementedError
def update(self):
"""Reset the Axes stack."""
self._nav_stack.clear()
self.set_history_buttons()
def set_history_buttons(self):
"""Enable or disable the back/forward button."""
class ToolContainerBase:
"""
Base class for all tool containers, e.g. toolbars.
Attributes
----------
toolmanager : `.ToolManager`
The tools with which this `ToolContainer` wants to communicate.
"""
_icon_extension = '.png'
"""
Toolcontainer button icon image format extension
**String**: Image extension
"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
toolmanager.toolmanager_connect(
'tool_message_event',
lambda event: self.set_message(event.message))
toolmanager.toolmanager_connect(
'tool_removed_event',
lambda event: self.remove_toolitem(event.tool.name))
def _tool_toggled_cbk(self, event):
"""
Capture the 'tool_trigger_[name]'
This only gets used for toggled tools.
"""
self.toggle_toolitem(event.tool.name, event.tool.toggled)
def add_tool(self, tool, group, position=-1):
"""
Add a tool to this container.
Parameters
----------
tool : tool_like
The tool to add, see `.ToolManager.get_tool`.
group : str
The name of the group to add this tool to.
position : int, default: -1
The position within the group to place this tool.
"""
tool = self.toolmanager.get_tool(tool)
image = self._get_image_filename(tool)
toggle = getattr(tool, 'toggled', None) is not None
self.add_toolitem(tool.name, group, position,
image, tool.description, toggle)
if toggle:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_toggled_cbk)
# If initially toggled
if tool.toggled:
self.toggle_toolitem(tool.name, True)
def _get_image_filename(self, tool):
"""Resolve a tool icon's filename."""
if not tool.image:
return None
if os.path.isabs(tool.image):
filename = tool.image
else:
if "image" in getattr(tool, "__dict__", {}):
raise ValueError("If 'tool.image' is an instance variable, "
"it must be an absolute path")
for cls in type(tool).__mro__:
if "image" in vars(cls):
try:
src = inspect.getfile(cls)
break
except (OSError, TypeError):
raise ValueError("Failed to locate source file "
"where 'tool.image' is defined") from None
else:
raise ValueError("Failed to find parent class defining 'tool.image'")
filename = str(pathlib.Path(src).parent / tool.image)
for filename in [filename, filename + self._icon_extension]:
if os.path.isfile(filename):
return os.path.abspath(filename)
for fname in [ # Fallback; once deprecation elapses.
tool.image,
tool.image + self._icon_extension,
cbook._get_data_path("images", tool.image),
cbook._get_data_path("images", tool.image + self._icon_extension),
]:
if os.path.isfile(fname):
_api.warn_deprecated(
"3.9", message=f"Loading icon {tool.image!r} from the current "
"directory or from Matplotlib's image directory. This behavior "
"is deprecated since %(since)s and will be removed in %(removal)s; "
"Tool.image should be set to a path relative to the Tool's source "
"file, or to an absolute path.")
return os.path.abspath(fname)
def trigger_tool(self, name):
"""
Trigger the tool.
Parameters
----------
name : str
Name (id) of the tool triggered from within the container.
"""
self.toolmanager.trigger_tool(name, sender=self)
def add_toolitem(self, name, group, position, image, description, toggle):
"""
A hook to add a toolitem to the container.
This hook must be implemented in each backend and contains the
backend-specific code to add an element to the toolbar.
.. warning::
This is part of the backend implementation and should
not be called by end-users. They should instead call
`.ToolContainerBase.add_tool`.
The callback associated with the button click event
must be *exactly* ``self.trigger_tool(name)``.
Parameters
----------
name : str
Name of the tool to add, this gets used as the tool's ID and as the
default label of the buttons.
group : str
Name of the group that this tool belongs to.
position : int
Position of the tool within its group, if -1 it goes at the end.
image : str
Filename of the image for the button or `None`.
description : str
Description of the tool, used for the tooltips.
toggle : bool
* `True` : The button is a toggle (change the pressed/unpressed
state between consecutive clicks).
* `False` : The button is a normal button (returns to unpressed
state after release).
"""
raise NotImplementedError
def toggle_toolitem(self, name, toggled):
"""
A hook to toggle a toolitem without firing an event.
This hook must be implemented in each backend and contains the
backend-specific code to silently toggle a toolbar element.
.. warning::
This is part of the backend implementation and should
not be called by end-users. They should instead call
`.ToolManager.trigger_tool` or `.ToolContainerBase.trigger_tool`
(which are equivalent).
Parameters
----------
name : str
Id of the tool to toggle.
toggled : bool
Whether to set this tool as toggled or not.
"""
raise NotImplementedError
def remove_toolitem(self, name):
"""
A hook to remove a toolitem from the container.
This hook must be implemented in each backend and contains the
backend-specific code to remove an element from the toolbar; it is
called when `.ToolManager` emits a ``tool_removed_event``.
Because some tools are present only on the `.ToolManager` but not on
the `ToolContainer`, this method must be a no-op when called on a tool
absent from the container.
.. warning::
This is part of the backend implementation and should
not be called by end-users. They should instead call
`.ToolManager.remove_tool`.
Parameters
----------
name : str
Name of the tool to remove.
"""
raise NotImplementedError
def set_message(self, s):
"""
Display a message on the toolbar.
Parameters
----------
s : str
Message text.
"""
raise NotImplementedError
class _Backend:
# A backend can be defined by using the following pattern:
#
# @_Backend.export
# class FooBackend(_Backend):
# # override the attributes and methods documented below.
# `backend_version` may be overridden by the subclass.
backend_version = "unknown"
# The `FigureCanvas` class must be defined.
FigureCanvas = None
# For interactive backends, the `FigureManager` class must be overridden.
FigureManager = FigureManagerBase
# For interactive backends, `mainloop` should be a function taking no
# argument and starting the backend main loop. It should be left as None
# for non-interactive backends.
mainloop = None
# The following methods will be automatically defined and exported, but
# can be overridden.
@classmethod
def new_figure_manager(cls, num, *args, **kwargs):
"""Create a new figure manager instance."""
# This import needs to happen here due to circular imports.
from matplotlib.figure import Figure
fig_cls = kwargs.pop('FigureClass', Figure)
fig = fig_cls(*args, **kwargs)
return cls.new_figure_manager_given_figure(num, fig)
@classmethod
def new_figure_manager_given_figure(cls, num, figure):
"""Create a new figure manager instance for the given figure."""
return cls.FigureCanvas.new_manager(figure, num)
@classmethod
def draw_if_interactive(cls):
manager_class = cls.FigureCanvas.manager_class
# Interactive backends reimplement start_main_loop or pyplot_show.
backend_is_interactive = (
manager_class.start_main_loop != FigureManagerBase.start_main_loop
or manager_class.pyplot_show != FigureManagerBase.pyplot_show)
if backend_is_interactive and is_interactive():
manager = Gcf.get_active()
if manager:
manager.canvas.draw_idle()
@classmethod
def show(cls, *, block=None):
"""
Show all figures.
`show` blocks by calling `mainloop` if *block* is ``True``, or if it is
``None`` and we are not in `interactive` mode and if IPython's
``%matplotlib`` integration has not been activated.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
try:
manager.show() # Emits a warning for non-interactive backend.
except NonGuiException as exc:
_api.warn_external(str(exc))
if cls.mainloop is None:
return
if block is None:
# Hack: Is IPython's %matplotlib integration activated? If so,
# IPython's activate_matplotlib (>= 0.10) tacks a _needmain
# attribute onto pyplot.show (always set to False).
pyplot_show = getattr(sys.modules.get("matplotlib.pyplot"), "show", None)
ipython_pylab = hasattr(pyplot_show, "_needmain")
block = not ipython_pylab and not is_interactive()
if block:
cls.mainloop()
# This method is the one actually exporting the required methods.
@staticmethod
def export(cls):
for name in [
"backend_version",
"FigureCanvas",
"FigureManager",
"new_figure_manager",
"new_figure_manager_given_figure",
"draw_if_interactive",
"show",
]:
setattr(sys.modules[cls.__module__], name, getattr(cls, name))
# For back-compatibility, generate a shim `Show` class.
class Show(ShowBase):
def mainloop(self):
return cls.mainloop()
setattr(sys.modules[cls.__module__], "Show", Show)
return cls
class ShowBase(_Backend):
"""
Simple base class to generate a ``show()`` function in backends.
Subclass must override ``mainloop()`` method.
"""
def __call__(self, block=None):
return self.show(block=block)
from matplotlib import _api, backend_tools, cbook, widgets
class ToolEvent:
"""Event for tool manipulation (add/remove)."""
def __init__(self, name, sender, tool, data=None):
self.name = name
self.sender = sender
self.tool = tool
self.data = data
class ToolTriggerEvent(ToolEvent):
"""Event to inform that a tool has been triggered."""
def __init__(self, name, sender, tool, canvasevent=None, data=None):
super().__init__(name, sender, tool, data)
self.canvasevent = canvasevent
class ToolManagerMessageEvent:
"""
Event carrying messages from toolmanager.
Messages usually get displayed to the user by the toolbar.
"""
def __init__(self, name, sender, message):
self.name = name
self.sender = sender
self.message = message
class ToolManager:
"""
Manager for actions triggered by user interactions (key press, toolbar
clicks, ...) on a Figure.
Attributes
----------
figure : `.Figure`
keypresslock : `~matplotlib.widgets.LockDraw`
`.LockDraw` object to know if the `canvas` key_press_event is locked.
messagelock : `~matplotlib.widgets.LockDraw`
`.LockDraw` object to know if the message is available to write.
"""
def __init__(self, figure=None):
self._key_press_handler_id = None
self._tools = {}
self._keys = {}
self._toggled = {}
self._callbacks = cbook.CallbackRegistry()
# to process keypress event
self.keypresslock = widgets.LockDraw()
self.messagelock = widgets.LockDraw()
self._figure = None
self.set_figure(figure)
@property
def canvas(self):
"""Canvas managed by FigureManager."""
if not self._figure:
return None
return self._figure.canvas
@property
def figure(self):
"""Figure that holds the canvas."""
return self._figure
@figure.setter
def figure(self, figure):
self.set_figure(figure)
def set_figure(self, figure, update_tools=True):
"""
Bind the given figure to the tools.
Parameters
----------
figure : `.Figure`
update_tools : bool, default: True
Force tools to update figure.
"""
if self._key_press_handler_id:
self.canvas.mpl_disconnect(self._key_press_handler_id)
self._figure = figure
if figure:
self._key_press_handler_id = self.canvas.mpl_connect(
'key_press_event', self._key_press)
if update_tools:
for tool in self._tools.values():
tool.figure = figure
def toolmanager_connect(self, s, func):
"""
Connect event with string *s* to *func*.
Parameters
----------
s : str
The name of the event. The following events are recognized:
- 'tool_message_event'
- 'tool_removed_event'
- 'tool_added_event'
For every tool added a new event is created
- 'tool_trigger_TOOLNAME', where TOOLNAME is the id of the tool.
func : callable
Callback function for the toolmanager event with signature::
def func(event: ToolEvent) -> Any
Returns
-------
cid
The callback id for the connection. This can be used in
`.toolmanager_disconnect`.
"""
return self._callbacks.connect(s, func)
def toolmanager_disconnect(self, cid):
"""
Disconnect callback id *cid*.
Example usage::
cid = toolmanager.toolmanager_connect('tool_trigger_zoom', onpress)
#...later
toolmanager.toolmanager_disconnect(cid)
"""
return self._callbacks.disconnect(cid)
def message_event(self, message, sender=None):
"""Emit a `ToolManagerMessageEvent`."""
if sender is None:
sender = self
s = 'tool_message_event'
event = ToolManagerMessageEvent(s, sender, message)
self._callbacks.process(s, event)
@property
def active_toggle(self):
"""Currently toggled tools."""
return self._toggled
def get_tool_keymap(self, name):
"""
Return the keymap associated with the specified tool.
Parameters
----------
name : str
Name of the Tool.
Returns
-------
list of str
List of keys associated with the tool.
"""
keys = [k for k, i in self._keys.items() if i == name]
return keys
def _remove_keys(self, name):
for k in self.get_tool_keymap(name):
del self._keys[k]
def update_keymap(self, name, key):
"""
Set the keymap to associate with the specified tool.
Parameters
----------
name : str
Name of the Tool.
key : str or list of str
Keys to associate with the tool.
"""
if name not in self._tools:
raise KeyError(f'{name!r} not in Tools')
self._remove_keys(name)
if isinstance(key, str):
key = [key]
for k in key:
if k in self._keys:
_api.warn_external(
f'Key {k} changed from {self._keys[k]} to {name}')
self._keys[k] = name
def remove_tool(self, name):
"""
Remove tool named *name*.
Parameters
----------
name : str
Name of the tool.
"""
tool = self.get_tool(name)
if getattr(tool, 'toggled', False): # If it's a toggled toggle tool, untoggle
self.trigger_tool(tool, 'toolmanager')
self._remove_keys(name)
event = ToolEvent('tool_removed_event', self, tool)
self._callbacks.process(event.name, event)
del self._tools[name]
def add_tool(self, name, tool, *args, **kwargs):
"""
Add *tool* to `ToolManager`.
If successful, adds a new event ``tool_trigger_{name}`` where
``{name}`` is the *name* of the tool; the event is fired every time the
tool is triggered.
Parameters
----------
name : str
Name of the tool, treated as the ID, has to be unique.
tool : type
Class of the tool to be added. A subclass will be used
instead if one was registered for the current canvas class.
*args, **kwargs
Passed to the *tool*'s constructor.
See Also
--------
matplotlib.backend_tools.ToolBase : The base class for tools.
"""
tool_cls = backend_tools._find_tool_class(type(self.canvas), tool)
if not tool_cls:
raise ValueError('Impossible to find class for %s' % str(tool))
if name in self._tools:
_api.warn_external('A "Tool class" with the same name already '
'exists, not added')
return self._tools[name]
tool_obj = tool_cls(self, name, *args, **kwargs)
self._tools[name] = tool_obj
if tool_obj.default_keymap is not None:
self.update_keymap(name, tool_obj.default_keymap)
# For toggle tools init the radio_group in self._toggled
if isinstance(tool_obj, backend_tools.ToolToggleBase):
# None group is not mutually exclusive, a set is used to keep track
# of all toggled tools in this group
if tool_obj.radio_group is None:
self._toggled.setdefault(None, set())
else:
self._toggled.setdefault(tool_obj.radio_group, None)
# If initially toggled
if tool_obj.toggled:
self._handle_toggle(tool_obj, None, None)
tool_obj.set_figure(self.figure)
event = ToolEvent('tool_added_event', self, tool_obj)
self._callbacks.process(event.name, event)
return tool_obj
def _handle_toggle(self, tool, canvasevent, data):
"""
Toggle tools, need to untoggle prior to using other Toggle tool.
Called from trigger_tool.
Parameters
----------
tool : `.ToolBase`
canvasevent : Event
Original Canvas event or None.
data : object
Extra data to pass to the tool when triggering.
"""
radio_group = tool.radio_group
# radio_group None is not mutually exclusive
# just keep track of toggled tools in this group
if radio_group is None:
if tool.name in self._toggled[None]:
self._toggled[None].remove(tool.name)
else:
self._toggled[None].add(tool.name)
return
# If the tool already has a toggled state, untoggle it
if self._toggled[radio_group] == tool.name:
toggled = None
# If no tool was toggled in the radio_group
# toggle it
elif self._toggled[radio_group] is None:
toggled = tool.name
# Other tool in the radio_group is toggled
else:
# Untoggle previously toggled tool
self.trigger_tool(self._toggled[radio_group],
self,
canvasevent,
data)
toggled = tool.name
# Keep track of the toggled tool in the radio_group
self._toggled[radio_group] = toggled
def trigger_tool(self, name, sender=None, canvasevent=None, data=None):
"""
Trigger a tool and emit the ``tool_trigger_{name}`` event.
Parameters
----------
name : str
Name of the tool.
sender : object
Object that wishes to trigger the tool.
canvasevent : Event
Original Canvas event or None.
data : object
Extra data to pass to the tool when triggering.
"""
tool = self.get_tool(name)
if tool is None:
return
if sender is None:
sender = self
if isinstance(tool, backend_tools.ToolToggleBase):
self._handle_toggle(tool, canvasevent, data)
tool.trigger(sender, canvasevent, data) # Actually trigger Tool.
s = 'tool_trigger_%s' % name
event = ToolTriggerEvent(s, sender, tool, canvasevent, data)
self._callbacks.process(s, event)
def _key_press(self, event):
if event.key is None or self.keypresslock.locked():
return
name = self._keys.get(event.key, None)
if name is None:
return
self.trigger_tool(name, canvasevent=event)
@property
def tools(self):
"""A dict mapping tool name -> controlled tool."""
return self._tools
def get_tool(self, name, warn=True):
"""
Return the tool object with the given name.
For convenience, this passes tool objects through.
Parameters
----------
name : str or `.ToolBase`
Name of the tool, or the tool itself.
warn : bool, default: True
Whether a warning should be emitted it no tool with the given name
exists.
Returns
-------
`.ToolBase` or None
The tool or None if no tool with the given name exists.
"""
if (isinstance(name, backend_tools.ToolBase)
and name.name in self._tools):
return name
if name not in self._tools:
if warn:
_api.warn_external(
f"ToolManager does not control tool {name!r}")
return None
return self._tools[name]
"""
Abstract base classes define the primitives for Tools.
These tools are used by `matplotlib.backend_managers.ToolManager`
:class:`ToolBase`
Simple stateless tool
:class:`ToolToggleBase`
Tool that has two states, only one Toggle tool can be
active at any given time for the same
`matplotlib.backend_managers.ToolManager`
"""
import enum
import functools
import re
import time
from types import SimpleNamespace
import uuid
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib._pylab_helpers import Gcf
from matplotlib import _api, cbook
class Cursors(enum.IntEnum): # Must subclass int for the macOS backend.
"""Backend-independent cursor types."""
POINTER = enum.auto()
HAND = enum.auto()
SELECT_REGION = enum.auto()
MOVE = enum.auto()
WAIT = enum.auto()
RESIZE_HORIZONTAL = enum.auto()
RESIZE_VERTICAL = enum.auto()
cursors = Cursors # Backcompat.
# _tool_registry, _register_tool_class, and _find_tool_class implement a
# mechanism through which ToolManager.add_tool can determine whether a subclass
# of the requested tool class has been registered (either for the current
# canvas class or for a parent class), in which case that tool subclass will be
# instantiated instead. This is the mechanism used e.g. to allow different
# GUI backends to implement different specializations for ConfigureSubplots.
_tool_registry = set()
def _register_tool_class(canvas_cls, tool_cls=None):
"""Decorator registering *tool_cls* as a tool class for *canvas_cls*."""
if tool_cls is None:
return functools.partial(_register_tool_class, canvas_cls)
_tool_registry.add((canvas_cls, tool_cls))
return tool_cls
def _find_tool_class(canvas_cls, tool_cls):
"""Find a subclass of *tool_cls* registered for *canvas_cls*."""
for canvas_parent in canvas_cls.__mro__:
for tool_child in _api.recursive_subclasses(tool_cls):
if (canvas_parent, tool_child) in _tool_registry:
return tool_child
return tool_cls
# Views positions tool
_views_positions = 'viewpos'
class ToolBase:
"""
Base tool class.
A base tool, only implements `trigger` method or no method at all.
The tool is instantiated by `matplotlib.backend_managers.ToolManager`.
"""
default_keymap = None
"""
Keymap to associate with this tool.
``list[str]``: List of keys that will trigger this tool when a keypress
event is emitted on ``self.figure.canvas``. Note that this attribute is
looked up on the instance, and can therefore be a property (this is used
e.g. by the built-in tools to load the rcParams at instantiation time).
"""
description = None
"""
Description of the Tool.
`str`: Tooltip used if the Tool is included in a Toolbar.
"""
image = None
"""
Icon filename.
``str | None``: Filename of the Toolbar icon; either absolute, or relative to the
directory containing the Python source file where the ``Tool.image`` class attribute
is defined (in the latter case, this cannot be defined as an instance attribute).
In either case, the extension is optional; leaving it off lets individual backends
select the icon format they prefer. If None, the *name* is used as a label in the
toolbar button.
"""
def __init__(self, toolmanager, name):
self._name = name
self._toolmanager = toolmanager
self._figure = None
name = property(
lambda self: self._name,
doc="The tool id (str, must be unique among tools of a tool manager).")
toolmanager = property(
lambda self: self._toolmanager,
doc="The `.ToolManager` that controls this tool.")
canvas = property(
lambda self: self._figure.canvas if self._figure is not None else None,
doc="The canvas of the figure affected by this tool, or None.")
def set_figure(self, figure):
self._figure = figure
figure = property(
lambda self: self._figure,
# The setter must explicitly call self.set_figure so that subclasses can
# meaningfully override it.
lambda self, figure: self.set_figure(figure),
doc="The Figure affected by this tool, or None.")
def _make_classic_style_pseudo_toolbar(self):
"""
Return a placeholder object with a single `canvas` attribute.
This is useful to reuse the implementations of tools already provided
by the classic Toolbars.
"""
return SimpleNamespace(canvas=self.canvas)
def trigger(self, sender, event, data=None):
"""
Called when this tool gets used.
This method is called by `.ToolManager.trigger_tool`.
Parameters
----------
event : `.Event`
The canvas event that caused this tool to be called.
sender : object
Object that requested the tool to be triggered.
data : object
Extra data.
"""
pass
class ToolToggleBase(ToolBase):
"""
Toggleable tool.
Every time it is triggered, it switches between enable and disable.
Parameters
----------
``*args``
Variable length argument to be used by the Tool.
``**kwargs``
`toggled` if present and True, sets the initial state of the Tool
Arbitrary keyword arguments to be consumed by the Tool
"""
radio_group = None
"""
Attribute to group 'radio' like tools (mutually exclusive).
`str` that identifies the group or **None** if not belonging to a group.
"""
cursor = None
"""Cursor to use when the tool is active."""
default_toggled = False
"""Default of toggled state."""
def __init__(self, *args, **kwargs):
self._toggled = kwargs.pop('toggled', self.default_toggled)
super().__init__(*args, **kwargs)
def trigger(self, sender, event, data=None):
"""Calls `enable` or `disable` based on `toggled` value."""
if self._toggled:
self.disable(event)
else:
self.enable(event)
self._toggled = not self._toggled
def enable(self, event=None):
"""
Enable the toggle tool.
`trigger` calls this method when `toggled` is False.
"""
pass
def disable(self, event=None):
"""
Disable the toggle tool.
`trigger` call this method when `toggled` is True.
This can happen in different circumstances.
* Click on the toolbar tool button.
* Call to `matplotlib.backend_managers.ToolManager.trigger_tool`.
* Another `ToolToggleBase` derived tool is triggered
(from the same `.ToolManager`).
"""
pass
@property
def toggled(self):
"""State of the toggled tool."""
return self._toggled
def set_figure(self, figure):
toggled = self.toggled
if toggled:
if self.figure:
self.trigger(self, None)
else:
# if no figure the internal state is not changed
# we change it here so next call to trigger will change it back
self._toggled = False
super().set_figure(figure)
if toggled:
if figure:
self.trigger(self, None)
else:
# if there is no figure, trigger won't change the internal
# state we change it back
self._toggled = True
class ToolSetCursor(ToolBase):
"""
Change to the current cursor while inaxes.
This tool, keeps track of all `ToolToggleBase` derived tools, and updates
the cursor when a tool gets triggered.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._id_drag = None
self._current_tool = None
self._default_cursor = cursors.POINTER
self._last_cursor = self._default_cursor
self.toolmanager.toolmanager_connect('tool_added_event',
self._add_tool_cbk)
for tool in self.toolmanager.tools.values(): # process current tools
self._add_tool_cbk(mpl.backend_managers.ToolEvent(
'tool_added_event', self.toolmanager, tool))
def set_figure(self, figure):
if self._id_drag:
self.canvas.mpl_disconnect(self._id_drag)
super().set_figure(figure)
if figure:
self._id_drag = self.canvas.mpl_connect(
'motion_notify_event', self._set_cursor_cbk)
def _add_tool_cbk(self, event):
"""Process every newly added tool."""
if getattr(event.tool, 'cursor', None) is not None:
self.toolmanager.toolmanager_connect(
f'tool_trigger_{event.tool.name}', self._tool_trigger_cbk)
def _tool_trigger_cbk(self, event):
self._current_tool = event.tool if event.tool.toggled else None
self._set_cursor_cbk(event.canvasevent)
def _set_cursor_cbk(self, event):
if not event or not self.canvas:
return
if (self._current_tool and getattr(event, "inaxes", None)
and event.inaxes.get_navigate()):
if self._last_cursor != self._current_tool.cursor:
self.canvas.set_cursor(self._current_tool.cursor)
self._last_cursor = self._current_tool.cursor
elif self._last_cursor != self._default_cursor:
self.canvas.set_cursor(self._default_cursor)
self._last_cursor = self._default_cursor
class ToolCursorPosition(ToolBase):
"""
Send message with the current pointer position.
This tool runs in the background reporting the position of the cursor.
"""
def __init__(self, *args, **kwargs):
self._id_drag = None
super().__init__(*args, **kwargs)
def set_figure(self, figure):
if self._id_drag:
self.canvas.mpl_disconnect(self._id_drag)
super().set_figure(figure)
if figure:
self._id_drag = self.canvas.mpl_connect(
'motion_notify_event', self.send_message)
def send_message(self, event):
"""Call `matplotlib.backend_managers.ToolManager.message_event`."""
if self.toolmanager.messagelock.locked():
return
from matplotlib.backend_bases import NavigationToolbar2
message = NavigationToolbar2._mouse_event_to_message(event)
self.toolmanager.message_event(message, self)
class RubberbandBase(ToolBase):
"""Draw and remove a rubberband."""
def trigger(self, sender, event, data=None):
"""Call `draw_rubberband` or `remove_rubberband` based on data."""
if not self.figure.canvas.widgetlock.available(sender):
return
if data is not None:
self.draw_rubberband(*data)
else:
self.remove_rubberband()
def draw_rubberband(self, *data):
"""
Draw rubberband.
This method must get implemented per backend.
"""
raise NotImplementedError
def remove_rubberband(self):
"""
Remove rubberband.
This method should get implemented per backend.
"""
pass
class ToolQuit(ToolBase):
"""Tool to call the figure manager destroy method."""
description = 'Quit the figure'
default_keymap = property(lambda self: mpl.rcParams['keymap.quit'])
def trigger(self, sender, event, data=None):
Gcf.destroy_fig(self.figure)
class ToolQuitAll(ToolBase):
"""Tool to call the figure manager destroy method."""
description = 'Quit all figures'
default_keymap = property(lambda self: mpl.rcParams['keymap.quit_all'])
def trigger(self, sender, event, data=None):
Gcf.destroy_all()
class ToolGrid(ToolBase):
"""Tool to toggle the major grids of the figure."""
description = 'Toggle major grids'
default_keymap = property(lambda self: mpl.rcParams['keymap.grid'])
def trigger(self, sender, event, data=None):
sentinel = str(uuid.uuid4())
# Trigger grid switching by temporarily setting :rc:`keymap.grid`
# to a unique key and sending an appropriate event.
with (cbook._setattr_cm(event, key=sentinel),
mpl.rc_context({'keymap.grid': sentinel})):
mpl.backend_bases.key_press_handler(event, self.figure.canvas)
class ToolMinorGrid(ToolBase):
"""Tool to toggle the major and minor grids of the figure."""
description = 'Toggle major and minor grids'
default_keymap = property(lambda self: mpl.rcParams['keymap.grid_minor'])
def trigger(self, sender, event, data=None):
sentinel = str(uuid.uuid4())
# Trigger grid switching by temporarily setting :rc:`keymap.grid_minor`
# to a unique key and sending an appropriate event.
with (cbook._setattr_cm(event, key=sentinel),
mpl.rc_context({'keymap.grid_minor': sentinel})):
mpl.backend_bases.key_press_handler(event, self.figure.canvas)
class ToolFullScreen(ToolBase):
"""Tool to toggle full screen."""
description = 'Toggle fullscreen mode'
default_keymap = property(lambda self: mpl.rcParams['keymap.fullscreen'])
def trigger(self, sender, event, data=None):
self.figure.canvas.manager.full_screen_toggle()
class AxisScaleBase(ToolToggleBase):
"""Base Tool to toggle between linear and logarithmic."""
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
super().trigger(sender, event, data)
def enable(self, event=None):
self.set_scale(event.inaxes, 'log')
self.figure.canvas.draw_idle()
def disable(self, event=None):
self.set_scale(event.inaxes, 'linear')
self.figure.canvas.draw_idle()
class ToolYScale(AxisScaleBase):
"""Tool to toggle between linear and logarithmic scales on the Y axis."""
description = 'Toggle scale Y axis'
default_keymap = property(lambda self: mpl.rcParams['keymap.yscale'])
def set_scale(self, ax, scale):
ax.set_yscale(scale)
class ToolXScale(AxisScaleBase):
"""Tool to toggle between linear and logarithmic scales on the X axis."""
description = 'Toggle scale X axis'
default_keymap = property(lambda self: mpl.rcParams['keymap.xscale'])
def set_scale(self, ax, scale):
ax.set_xscale(scale)
class ToolViewsPositions(ToolBase):
"""
Auxiliary Tool to handle changes in views and positions.
Runs in the background and should get used by all the tools that
need to access the figure's history of views and positions, e.g.
* `ToolZoom`
* `ToolPan`
* `ToolHome`
* `ToolBack`
* `ToolForward`
"""
def __init__(self, *args, **kwargs):
self.views = WeakKeyDictionary()
self.positions = WeakKeyDictionary()
self.home_views = WeakKeyDictionary()
super().__init__(*args, **kwargs)
def add_figure(self, figure):
"""Add the current figure to the stack of views and positions."""
if figure not in self.views:
self.views[figure] = cbook._Stack()
self.positions[figure] = cbook._Stack()
self.home_views[figure] = WeakKeyDictionary()
# Define Home
self.push_current(figure)
# Make sure we add a home view for new Axes as they're added
figure.add_axobserver(lambda fig: self.update_home_views(fig))
def clear(self, figure):
"""Reset the Axes stack."""
if figure in self.views:
self.views[figure].clear()
self.positions[figure].clear()
self.home_views[figure].clear()
self.update_home_views()
def update_view(self):
"""
Update the view limits and position for each Axes from the current
stack position. If any Axes are present in the figure that aren't in
the current stack position, use the home view limits for those Axes and
don't update *any* positions.
"""
views = self.views[self.figure]()
if views is None:
return
pos = self.positions[self.figure]()
if pos is None:
return
home_views = self.home_views[self.figure]
all_axes = self.figure.get_axes()
for a in all_axes:
if a in views:
cur_view = views[a]
else:
cur_view = home_views[a]
a._set_view(cur_view)
if set(all_axes).issubset(pos):
for a in all_axes:
# Restore both the original and modified positions
a._set_position(pos[a][0], 'original')
a._set_position(pos[a][1], 'active')
self.figure.canvas.draw_idle()
def push_current(self, figure=None):
"""
Push the current view limits and position onto their respective stacks.
"""
if not figure:
figure = self.figure
views = WeakKeyDictionary()
pos = WeakKeyDictionary()
for a in figure.get_axes():
views[a] = a._get_view()
pos[a] = self._axes_pos(a)
self.views[figure].push(views)
self.positions[figure].push(pos)
def _axes_pos(self, ax):
"""
Return the original and modified positions for the specified Axes.
Parameters
----------
ax : matplotlib.axes.Axes
The `.Axes` to get the positions for.
Returns
-------
original_position, modified_position
A tuple of the original and modified positions.
"""
return (ax.get_position(True).frozen(),
ax.get_position().frozen())
def update_home_views(self, figure=None):
"""
Make sure that ``self.home_views`` has an entry for all Axes present
in the figure.
"""
if not figure:
figure = self.figure
for a in figure.get_axes():
if a not in self.home_views[figure]:
self.home_views[figure][a] = a._get_view()
def home(self):
"""Recall the first view and position from the stack."""
self.views[self.figure].home()
self.positions[self.figure].home()
def back(self):
"""Back one step in the stack of views and positions."""
self.views[self.figure].back()
self.positions[self.figure].back()
def forward(self):
"""Forward one step in the stack of views and positions."""
self.views[self.figure].forward()
self.positions[self.figure].forward()
class ViewsPositionsBase(ToolBase):
"""Base class for `ToolHome`, `ToolBack` and `ToolForward`."""
_on_trigger = None
def trigger(self, sender, event, data=None):
self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
getattr(self.toolmanager.get_tool(_views_positions),
self._on_trigger)()
self.toolmanager.get_tool(_views_positions).update_view()
class ToolHome(ViewsPositionsBase):
"""Restore the original view limits."""
description = 'Reset original view'
image = 'mpl-data/images/home'
default_keymap = property(lambda self: mpl.rcParams['keymap.home'])
_on_trigger = 'home'
class ToolBack(ViewsPositionsBase):
"""Move back up the view limits stack."""
description = 'Back to previous view'
image = 'mpl-data/images/back'
default_keymap = property(lambda self: mpl.rcParams['keymap.back'])
_on_trigger = 'back'
class ToolForward(ViewsPositionsBase):
"""Move forward in the view lim stack."""
description = 'Forward to next view'
image = 'mpl-data/images/forward'
default_keymap = property(lambda self: mpl.rcParams['keymap.forward'])
_on_trigger = 'forward'
class ConfigureSubplotsBase(ToolBase):
"""Base tool for the configuration of subplots."""
description = 'Configure subplots'
image = 'mpl-data/images/subplots'
class SaveFigureBase(ToolBase):
"""Base tool for figure saving."""
description = 'Save the figure'
image = 'mpl-data/images/filesave'
default_keymap = property(lambda self: mpl.rcParams['keymap.save'])
class ZoomPanBase(ToolToggleBase):
"""Base class for `ToolZoom` and `ToolPan`."""
def __init__(self, *args):
super().__init__(*args)
self._button_pressed = None
self._xypress = None
self._idPress = None
self._idRelease = None
self._idScroll = None
self.base_scale = 2.
self.scrollthresh = .5 # .5 second scroll threshold
self.lastscroll = time.time()-self.scrollthresh
def enable(self, event=None):
"""Connect press/release events and lock the canvas."""
self.figure.canvas.widgetlock(self)
self._idPress = self.figure.canvas.mpl_connect(
'button_press_event', self._press)
self._idRelease = self.figure.canvas.mpl_connect(
'button_release_event', self._release)
self._idScroll = self.figure.canvas.mpl_connect(
'scroll_event', self.scroll_zoom)
def disable(self, event=None):
"""Release the canvas and disconnect press/release events."""
self._cancel_action()
self.figure.canvas.widgetlock.release(self)
self.figure.canvas.mpl_disconnect(self._idPress)
self.figure.canvas.mpl_disconnect(self._idRelease)
self.figure.canvas.mpl_disconnect(self._idScroll)
def trigger(self, sender, event, data=None):
self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
super().trigger(sender, event, data)
new_navigate_mode = self.name.upper() if self.toggled else None
for ax in self.figure.axes:
ax.set_navigate_mode(new_navigate_mode)
def scroll_zoom(self, event):
# https://gist.github.com/tacaswell/3144287
if event.inaxes is None:
return
if event.button == 'up':
# deal with zoom in
scl = self.base_scale
elif event.button == 'down':
# deal with zoom out
scl = 1/self.base_scale
else:
# deal with something that should never happen
scl = 1
ax = event.inaxes
ax._set_view_from_bbox([event.x, event.y, scl])
# If last scroll was done within the timing threshold, delete the
# previous view
if (time.time()-self.lastscroll) < self.scrollthresh:
self.toolmanager.get_tool(_views_positions).back()
self.figure.canvas.draw_idle() # force re-draw
self.lastscroll = time.time()
self.toolmanager.get_tool(_views_positions).push_current()
class ToolZoom(ZoomPanBase):
"""A Tool for zooming using a rectangle selector."""
description = 'Zoom to rectangle'
image = 'mpl-data/images/zoom_to_rect'
default_keymap = property(lambda self: mpl.rcParams['keymap.zoom'])
cursor = cursors.SELECT_REGION
radio_group = 'default'
def __init__(self, *args):
super().__init__(*args)
self._ids_zoom = []
def _cancel_action(self):
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self.toolmanager.trigger_tool('rubberband', self)
self.figure.canvas.draw_idle()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
def _press(self, event):
"""Callback for mouse button presses in zoom-to-rectangle mode."""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom:
self._cancel_action()
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
id2 = self.figure.canvas.mpl_connect(
'key_press_event', self._switch_on_zoom_mode)
id3 = self.figure.canvas.mpl_connect(
'key_release_event', self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self._mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self._mouse_move(event)
def _mouse_move(self, event):
"""Callback for mouse moves in zoom-to-rectangle mode."""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = self._xypress[0]
(x1, y1), (x2, y2) = np.clip(
[[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)
if self._zoom_mode == "x":
y1, y2 = a.bbox.intervaly
elif self._zoom_mode == "y":
x1, x2 = a.bbox.intervalx
self.toolmanager.trigger_tool(
'rubberband', self, data=(x1, y1, x2, y2))
def _release(self, event):
"""Callback for mouse button releases in zoom-to-rectangle mode."""
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
self._cancel_action()
return
done_ax = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, _ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._cancel_action()
return
# detect twinx, twiny Axes and avoid double zooming
twinx = any(a.get_shared_x_axes().joined(a, a1) for a1 in done_ax)
twiny = any(a.get_shared_y_axes().joined(a, a1) for a1 in done_ax)
done_ax.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self._zoom_mode = None
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
class ToolPan(ZoomPanBase):
"""Pan Axes with left mouse, zoom with right."""
default_keymap = property(lambda self: mpl.rcParams['keymap.pan'])
description = 'Pan axes with left mouse, zoom with right'
image = 'mpl-data/images/move'
cursor = cursors.MOVE
radio_group = 'default'
def __init__(self, *args):
super().__init__(*args)
self._id_drag = None
def _cancel_action(self):
self._button_pressed = None
self._xypress = []
self.figure.canvas.mpl_disconnect(self._id_drag)
self.toolmanager.messagelock.release(self)
self.figure.canvas.draw_idle()
def _press(self, event):
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.toolmanager.messagelock(self)
self._id_drag = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
def _release(self, event):
if self._button_pressed is None:
self._cancel_action()
return
self.figure.canvas.mpl_disconnect(self._id_drag)
self.toolmanager.messagelock.release(self)
for a, _ind in self._xypress:
a.end_pan()
if not self._xypress:
self._cancel_action()
return
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
def _mouse_move(self, event):
for a, _ind in self._xypress:
# safer to use the recorded button at the _press than current
# button: # multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.toolmanager.canvas.draw_idle()
class ToolHelpBase(ToolBase):
description = 'Print tool list, shortcuts and description'
default_keymap = property(lambda self: mpl.rcParams['keymap.help'])
image = 'mpl-data/images/help'
@staticmethod
def format_shortcut(key_sequence):
"""
Convert a shortcut string from the notation used in rc config to the
standard notation for displaying shortcuts, e.g. 'ctrl+a' -> 'Ctrl+A'.
"""
return (key_sequence if len(key_sequence) == 1 else
re.sub(r"\+[A-Z]", r"+Shift\g<0>", key_sequence).title())
def _format_tool_keymap(self, name):
keymaps = self.toolmanager.get_tool_keymap(name)
return ", ".join(self.format_shortcut(keymap) for keymap in keymaps)
def _get_help_entries(self):
return [(name, self._format_tool_keymap(name), tool.description)
for name, tool in sorted(self.toolmanager.tools.items())
if tool.description]
def _get_help_text(self):
entries = self._get_help_entries()
entries = ["{}: {}\n\t{}".format(*entry) for entry in entries]
return "\n".join(entries)
def _get_help_html(self):
fmt = "
{}
{}
{}
"
rows = [fmt.format(
"Action", "Shortcuts", "Description")]
rows += [fmt.format(*row) for row in self._get_help_entries()]
return (""
"
" + rows[0] + ""
"".join(rows[1:]) + "
")
class ToolCopyToClipboardBase(ToolBase):
"""Tool to copy the figure to the clipboard."""
description = 'Copy the canvas figure to clipboard'
default_keymap = property(lambda self: mpl.rcParams['keymap.copy'])
def trigger(self, *args, **kwargs):
message = "Copy tool is not available"
self.toolmanager.message_event(message, self)
default_tools = {'home': ToolHome, 'back': ToolBack, 'forward': ToolForward,
'zoom': ToolZoom, 'pan': ToolPan,
'subplots': ConfigureSubplotsBase,
'save': SaveFigureBase,
'grid': ToolGrid,
'grid_minor': ToolMinorGrid,
'fullscreen': ToolFullScreen,
'quit': ToolQuit,
'quit_all': ToolQuitAll,
'xscale': ToolXScale,
'yscale': ToolYScale,
'position': ToolCursorPosition,
_views_positions: ToolViewsPositions,
'cursor': ToolSetCursor,
'rubberband': RubberbandBase,
'help': ToolHelpBase,
'copy': ToolCopyToClipboardBase,
}
default_toolbar_tools = [['navigation', ['home', 'back', 'forward']],
['zoompan', ['pan', 'zoom', 'subplots']],
['io', ['save', 'help']]]
def add_tools_to_manager(toolmanager, tools=default_tools):
"""
Add multiple tools to a `.ToolManager`.
Parameters
----------
toolmanager : `.backend_managers.ToolManager`
Manager to which the tools are added.
tools : {str: class_like}, optional
The tools to add in a {name: tool} dict, see
`.backend_managers.ToolManager.add_tool` for more info.
"""
for name, tool in tools.items():
toolmanager.add_tool(name, tool)
def add_tools_to_container(container, tools=default_toolbar_tools):
"""
Add multiple tools to the container.
Parameters
----------
container : Container
`.backend_bases.ToolContainerBase` object that will get the tools
added.
tools : list, optional
List in the form ``[[group1, [tool1, tool2 ...]], [group2, [...]]]``
where the tools ``[tool1, tool2, ...]`` will display in group1.
See `.backend_bases.ToolContainerBase.add_tool` for details.
"""
for group, grouptools in tools:
for position, tool in enumerate(grouptools):
container.add_tool(tool, group, position)
venv\Lib\site-packages\matplotlib\bezier.py
"""
A module providing some utility functions regarding Bézier path manipulation.
"""
from functools import lru_cache
import math
import warnings
import numpy as np
from matplotlib import _api
# same algorithm as 3.8's math.comb
@np.vectorize
@lru_cache(maxsize=128)
def _comb(n, k):
if k > n:
return 0
k = min(k, n - k)
i = np.arange(1, k + 1)
return np.prod((n + 1 - i)/i).astype(int)
class NonIntersectingPathException(ValueError):
pass
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
"""
Return the intersection between the line through (*cx1*, *cy1*) at angle
*t1* and the line through (*cx2*, *cy2*) at angle *t2*.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a * d - b * c
if abs(ad_bc) < 1e-12:
raise ValueError("Given lines do not intersect. Please verify that "
"the angles are not equal or differ by 180 degrees.")
# rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = (k / ad_bc for k in [a_, b_, c_, d_])
x = a_ * line1_rhs + b_ * line2_rhs
y = c_ * line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having an angle *t*, return
locations of the two points located along its perpendicular line at the
distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
return x1, y1, x2, y2
# BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1 - t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""
Split a Bézier segment defined by its control points *beta* into two
separate segments divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
def find_bezier_t_intersecting_with_closedpath(
bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerance=0.01):
"""
Find the intersection of the Bézier curve with a closed path.
The intersection point *t* is approximated by two parameters *t0*, *t1*
such that *t0* <= *t* <= *t1*.
Search starts from *t0* and *t1* and uses a simple bisecting algorithm
therefore one of the end points must be inside the path while the other
doesn't. The search stops when the distance of the points parametrized by
*t0* and *t1* gets smaller than the given *tolerance*.
Parameters
----------
bezier_point_at_t : callable
A function returning x, y coordinates of the Bézier at parameter *t*.
It must have the signature::
bezier_point_at_t(t: float) -> tuple[float, float]
inside_closedpath : callable
A function returning True if a given point (x, y) is inside the
closed path. It must have the signature::
inside_closedpath(point: tuple[float, float]) -> bool
t0, t1 : float
Start parameters for the search.
tolerance : float
Maximal allowed distance between the final points.
Returns
-------
t0, t1 : float
The Bézier path parameters.
"""
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if start_inside == end_inside and start != end:
raise NonIntersectingPathException(
"Both points are on the same side of the closed path")
while True:
# return if the distance is smaller than the tolerance
if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance:
return t0, t1
# calculate the middle point
middle_t = 0.5 * (t0 + t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if start_inside ^ middle_inside:
t1 = middle_t
if end == middle:
# Edge case where infinite loop is possible
# Caused by large numbers relative to tolerance
return t0, t1
end = middle
else:
t0 = middle_t
if start == middle:
# Edge case where infinite loop is possible
# Caused by large numbers relative to tolerance
return t0, t1
start = middle
start_inside = middle_inside
class BezierSegment:
"""
A d-dimensional Bézier segment.
Parameters
----------
control_points : (N, d) array
Location of the *N* control points.
"""
def __init__(self, control_points):
self._cpoints = np.asarray(control_points)
self._N, self._d = self._cpoints.shape
self._orders = np.arange(self._N)
coeff = [math.factorial(self._N - 1)
// (math.factorial(i) * math.factorial(self._N - 1 - i))
for i in range(self._N)]
self._px = (self._cpoints.T * coeff).T
def __call__(self, t):
"""
Evaluate the Bézier curve at point(s) *t* in [0, 1].
Parameters
----------
t : (k,) array-like
Points at which to evaluate the curve.
Returns
-------
(k, d) array
Value of the curve for each point in *t*.
"""
t = np.asarray(t)
return (np.power.outer(1 - t, self._orders[::-1])
* np.power.outer(t, self._orders)) @ self._px
def point_at_t(self, t):
"""
Evaluate the curve at a single point, returning a tuple of *d* floats.
"""
return tuple(self(t))
@property
def control_points(self):
"""The control points of the curve."""
return self._cpoints
@property
def dimension(self):
"""The dimension of the curve."""
return self._d
@property
def degree(self):
"""Degree of the polynomial. One less the number of control points."""
return self._N - 1
@property
def polynomial_coefficients(self):
r"""
The polynomial coefficients of the Bézier curve.
.. warning:: Follows opposite convention from `numpy.polyval`.
Returns
-------
(n+1, d) array
Coefficients after expanding in polynomial basis, where :math:`n`
is the degree of the Bézier curve and :math:`d` its dimension.
These are the numbers (:math:`C_j`) such that the curve can be
written :math:`\sum_{j=0}^n C_j t^j`.
Notes
-----
The coefficients are calculated as
.. math::
{n \choose j} \sum_{i=0}^j (-1)^{i+j} {j \choose i} P_i
where :math:`P_i` are the control points of the curve.
"""
n = self.degree
# matplotlib uses n <= 4. overflow plausible starting around n = 15.
if n > 10:
warnings.warn("Polynomial coefficients formula unstable for high "
"order Bezier curves!", RuntimeWarning)
P = self.control_points
j = np.arange(n+1)[:, None]
i = np.arange(n+1)[None, :] # _comb is non-zero for i <= j
prefactor = (-1)**(i + j) * _comb(j, i) # j on axis 0, i on axis 1
return _comb(n, j) * prefactor @ P # j on axis 0, self.dimension on 1
def axis_aligned_extrema(self):
"""
Return the dimension and location of the curve's interior extrema.
The extrema are the points along the curve where one of its partial
derivatives is zero.
Returns
-------
dims : array of int
Index :math:`i` of the partial derivative which is zero at each
interior extrema.
dzeros : array of float
Of same size as dims. The :math:`t` such that :math:`d/dx_i B(t) =
0`
"""
n = self.degree
if n <= 1:
return np.array([]), np.array([])
Cj = self.polynomial_coefficients
dCj = np.arange(1, n+1)[:, None] * Cj[1:]
dims = []
roots = []
for i, pi in enumerate(dCj.T):
r = np.roots(pi[::-1])
roots.append(r)
dims.append(np.full_like(r, i))
roots = np.concatenate(roots)
dims = np.concatenate(dims)
in_range = np.isreal(roots) & (roots >= 0) & (roots <= 1)
return dims[in_range], np.real(roots)[in_range]
def split_bezier_intersecting_with_closedpath(
bezier, inside_closedpath, tolerance=0.01):
"""
Split a Bézier curve into two at the intersection with a closed path.
Parameters
----------
bezier : (N, 2) array-like
Control points of the Bézier segment. See `.BezierSegment`.
inside_closedpath : callable
A function returning True if a given point (x, y) is inside the
closed path. See also `.find_bezier_t_intersecting_with_closedpath`.
tolerance : float
The tolerance for the intersection. See also
`.find_bezier_t_intersecting_with_closedpath`.
Returns
-------
left, right
Lists of control points for the two Bézier segments.
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(
bezier_point_at_t, inside_closedpath, tolerance=tolerance)
_left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
return _left, _right
# matplotlib specific
def split_path_inout(path, inside, tolerance=0.01, reorder_inout=False):
"""
Divide a path into two segments at the point where ``inside(x, y)`` becomes
False.
"""
from .path import Path
path_iter = path.iter_segments()
ctl_points, command = next(path_iter)
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
ctl_points_old = ctl_points
iold = 0
i = 1
for ctl_points, command in path_iter:
iold = i
i += len(ctl_points) // 2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = np.concatenate([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
else:
raise ValueError("The path does not intersect with the patch")
bp = bezier_path.reshape((-1, 2))
left, right = split_bezier_intersecting_with_closedpath(
bp, inside, tolerance)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise AssertionError("This should never be reached")
verts_left = left[1:]
verts_right = right[:]
if path.codes is None:
path_in = Path(np.concatenate([path.vertices[:i], verts_left]))
path_out = Path(np.concatenate([verts_right, path.vertices[i:]]))
else:
path_in = Path(np.concatenate([path.vertices[:iold], verts_left]),
np.concatenate([path.codes[:iold], codes_left]))
path_out = Path(np.concatenate([verts_right, path.vertices[i:]]),
np.concatenate([codes_right, path.codes[i:]]))
if reorder_inout and not begin_inside:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
"""
Return a function that checks whether a point is in a circle with center
(*cx*, *cy*) and radius *r*.
The returned function has the signature::
f(xy: tuple[float, float]) -> bool
"""
r2 = r ** 2
def _f(xy):
x, y = xy
return (x - cx) ** 2 + (y - cy) ** 2 < r2
return _f
# quadratic Bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1 - x0, y1 - y0
d = (dx * dx + dy * dy) ** .5
# Account for divide by zero
if d == 0:
return 0.0, 0.0
return dx / d, dy / d
def check_if_parallel(dx1, dy1, dx2, dy2, tolerance=1.e-5):
"""
Check if two lines are parallel.
Parameters
----------
dx1, dy1, dx2, dy2 : float
The gradients *dy*/*dx* of the two lines.
tolerance : float
The angular tolerance in radians up to which the lines are considered
parallel.
Returns
-------
is_parallel
- 1 if two lines are parallel in same direction.
- -1 if two lines are parallel in opposite direction.
- False otherwise.
"""
theta1 = np.arctan2(dx1, dy1)
theta2 = np.arctan2(dx2, dy2)
dtheta = abs(theta1 - theta2)
if dtheta < tolerance:
return 1
elif abs(dtheta - np.pi) < tolerance:
return -1
else:
return False
def get_parallels(bezier2, width):
"""
Given the quadratic Bézier control points *bezier2*, returns
control points of quadratic Bézier lines roughly parallel to given
one separated by *width*.
"""
# The parallel Bezier lines are constructed by following ways.
# c1 and c2 are control points representing the start and end of the
# Bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
cmx - c2x, cmy - c2y)
if parallel_test == -1:
_api.warn_external(
"Lines do not intersect. A straight line is used instead.")
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
cos_t2, sin_t2 = cos_t1, sin_t1
else:
# t1 and t2 is the angle between c1 and cm, cm, c2. They are
# also an angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# through c1 and perpendicular to the tangential lines of the
# Bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = (
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
)
c2x_left, c2y_left, c2x_right, c2y_right = (
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
)
# find cm_left which is the intersecting point of a line through
# c1_left with angle t1 and a line through c2_left with angle
# t2. Same with cm_right.
try:
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1,
sin_t1, c2x_left, c2y_left,
cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1,
sin_t1, c2x_right, c2y_right,
cos_t2, sin_t2)
except ValueError:
# Special case straight lines, i.e., angle between two lines is
# less than the threshold used by get_intersection (we don't use
# check_if_parallel as the threshold is not the same).
cmx_left, cmy_left = (
0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
)
cmx_right, cmy_right = (
0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
)
# the parallel Bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left),
(cmx_left, cmy_left),
(c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right),
(cmx_right, cmy_right),
(c2x_right, c2y_right)]
return path_left, path_right
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
"""
Find control points of the Bézier curve passing through (*c1x*, *c1y*),
(*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1.
"""
cmx = .5 * (4 * mmx - (c1x + c2x))
cmy = .5 * (4 * mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to `get_parallels`, returns control points of two quadratic
Bézier lines having a width roughly parallel to given one separated by
*width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the angle between c1 and cm, cm, c3.
# They are also an angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# through c1 and perpendicular to the tangential lines of the
# Bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = (
get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
)
c3x_left, c3y_left, c3x_right, c3y_right = (
get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
# c12-c23
c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = (
get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
venv\Lib\site-packages\matplotlib\category.py
"""
Plotting of string "category" data: ``plot(['d', 'f', 'a'], [1, 2, 3])`` will
plot three points with x-axis values of 'd', 'f', 'a'.
See :doc:`/gallery/lines_bars_and_markers/categorical_variables` for an
example.
The module uses Matplotlib's `matplotlib.units` mechanism to convert from
strings to integers and provides a tick locator, a tick formatter, and the
`.UnitData` class that creates and stores the string-to-integer mapping.
"""
from collections import OrderedDict
import dateutil.parser
import itertools
import logging
import numpy as np
from matplotlib import _api, cbook, ticker, units
_log = logging.getLogger(__name__)
class StrCategoryConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
"""
Convert strings in *value* to floats using mapping information stored
in the *unit* object.
Parameters
----------
value : str or iterable
Value or list of values to be converted.
unit : `.UnitData`
An object mapping strings to integers.
axis : `~matplotlib.axis.Axis`
The axis on which the converted value is plotted.
.. note:: *axis* is unused.
Returns
-------
float or `~numpy.ndarray` of float
"""
if unit is None:
raise ValueError(
'Missing category information for StrCategoryConverter; '
'this might be caused by unintendedly mixing categorical and '
'numeric data')
StrCategoryConverter._validate_unit(unit)
# dtype = object preserves numerical pass throughs
values = np.atleast_1d(np.array(value, dtype=object))
# force an update so it also does type checking
unit.update(values)
s = np.vectorize(unit._mapping.__getitem__, otypes=[float])(values)
return s if not cbook.is_scalar_or_string(value) else s[0]
@staticmethod
def axisinfo(unit, axis):
"""
Set the default axis ticks and labels.
Parameters
----------
unit : `.UnitData`
object string unit information for value
axis : `~matplotlib.axis.Axis`
axis for which information is being set
.. note:: *axis* is not used
Returns
-------
`~matplotlib.units.AxisInfo`
Information to support default tick labeling
"""
StrCategoryConverter._validate_unit(unit)
# locator and formatter take mapping dict because
# args need to be pass by reference for updates
majloc = StrCategoryLocator(unit._mapping)
majfmt = StrCategoryFormatter(unit._mapping)
return units.AxisInfo(majloc=majloc, majfmt=majfmt)
@staticmethod
def default_units(data, axis):
"""
Set and update the `~matplotlib.axis.Axis` units.
Parameters
----------
data : str or iterable of str
axis : `~matplotlib.axis.Axis`
axis on which the data is plotted
Returns
-------
`.UnitData`
object storing string to integer mapping
"""
# the conversion call stack is default_units -> axis_info -> convert
if axis.units is None:
axis.set_units(UnitData(data))
else:
axis.units.update(data)
return axis.units
@staticmethod
def _validate_unit(unit):
if not hasattr(unit, '_mapping'):
raise ValueError(
f'Provided unit "{unit}" is not valid for a categorical '
'converter, as it does not have a _mapping attribute.')
class StrCategoryLocator(ticker.Locator):
"""Tick at every integer mapping of the string data."""
def __init__(self, units_mapping):
"""
Parameters
----------
units_mapping : dict
Mapping of category names (str) to indices (int).
"""
self._units = units_mapping
def __call__(self):
# docstring inherited
return list(self._units.values())
def tick_values(self, vmin, vmax):
# docstring inherited
return self()
class StrCategoryFormatter(ticker.Formatter):
"""String representation of the data at every tick."""
def __init__(self, units_mapping):
"""
Parameters
----------
units_mapping : dict
Mapping of category names (str) to indices (int).
"""
self._units = units_mapping
def __call__(self, x, pos=None):
# docstring inherited
return self.format_ticks([x])[0]
def format_ticks(self, values):
# docstring inherited
r_mapping = {v: self._text(k) for k, v in self._units.items()}
return [r_mapping.get(round(val), '') for val in values]
@staticmethod
def _text(value):
"""Convert text values into utf-8 or ascii strings."""
if isinstance(value, bytes):
value = value.decode(encoding='utf-8')
elif not isinstance(value, str):
value = str(value)
return value
class UnitData:
def __init__(self, data=None):
"""
Create mapping between unique categorical values and integer ids.
Parameters
----------
data : iterable
sequence of string values
"""
self._mapping = OrderedDict()
self._counter = itertools.count()
if data is not None:
self.update(data)
@staticmethod
def _str_is_convertible(val):
"""
Helper method to check whether a string can be parsed as float or date.
"""
try:
float(val)
except ValueError:
try:
dateutil.parser.parse(val)
except (ValueError, TypeError):
# TypeError if dateutil >= 2.8.1 else ValueError
return False
return True
def update(self, data):
"""
Map new values to integer identifiers.
Parameters
----------
data : iterable of str or bytes
Raises
------
TypeError
If elements in *data* are neither str nor bytes.
"""
data = np.atleast_1d(np.array(data, dtype=object))
# check if convertible to number:
convertible = True
for val in OrderedDict.fromkeys(data):
# OrderedDict just iterates over unique values in data.
_api.check_isinstance((str, bytes), value=val)
if convertible:
# this will only be called so long as convertible is True.
convertible = self._str_is_convertible(val)
if val not in self._mapping:
self._mapping[val] = next(self._counter)
if data.size and convertible:
_log.info('Using categorical units to plot a list of strings '
'that are all parsable as floats or dates. If these '
'strings should be plotted as numbers, cast to the '
'appropriate data type before plotting.')
# Register the converter with Matplotlib's unit framework
# Intentionally set to a single instance
units.registry[str] = \
units.registry[np.str_] = \
units.registry[bytes] = \
units.registry[np.bytes_] = StrCategoryConverter()
"""
Builtin colormaps, colormap handling utilities, and the `ScalarMappable` mixin.
.. seealso::
:doc:`/gallery/color/colormap_reference` for a list of builtin colormaps.
:ref:`colormap-manipulation` for examples of how to make
colormaps.
:ref:`colormaps` an in-depth discussion of choosing
colormaps.
:ref:`colormapnorms` for more details about data normalization.
"""
from collections.abc import Mapping
import matplotlib as mpl
from matplotlib import _api, colors
# TODO make this warn on access
from matplotlib.colorizer import _ScalarMappable as ScalarMappable # noqa
from matplotlib._cm import datad
from matplotlib._cm_listed import cmaps as cmaps_listed
from matplotlib._cm_multivar import cmap_families as multivar_cmaps
from matplotlib._cm_bivar import cmaps as bivar_cmaps
_LUTSIZE = mpl.rcParams['image.lut']
def _gen_cmap_registry():
"""
Generate a dict mapping standard colormap names to standard colormaps, as
well as the reversed colormaps.
"""
cmap_d = {**cmaps_listed}
for name, spec in datad.items():
cmap_d[name] = ( # Precache the cmaps at a fixed lutsize..
colors.LinearSegmentedColormap(name, spec, _LUTSIZE)
if 'red' in spec else
colors.ListedColormap(spec['listed'], name)
if 'listed' in spec else
colors.LinearSegmentedColormap.from_list(name, spec, _LUTSIZE))
# Register colormap aliases for gray and grey.
aliases = {
# alias -> original name
'grey': 'gray',
'gist_grey': 'gist_gray',
'gist_yerg': 'gist_yarg',
'Grays': 'Greys',
}
for alias, original_name in aliases.items():
cmap = cmap_d[original_name].copy()
cmap.name = alias
cmap_d[alias] = cmap
# Generate reversed cmaps.
for cmap in list(cmap_d.values()):
rmap = cmap.reversed()
cmap_d[rmap.name] = rmap
return cmap_d
class ColormapRegistry(Mapping):
r"""
Container for colormaps that are known to Matplotlib by name.
The universal registry instance is `matplotlib.colormaps`. There should be
no need for users to instantiate `.ColormapRegistry` themselves.
Read access uses a dict-like interface mapping names to `.Colormap`\s::
import matplotlib as mpl
cmap = mpl.colormaps['viridis']
Returned `.Colormap`\s are copies, so that their modification does not
change the global definition of the colormap.
Additional colormaps can be added via `.ColormapRegistry.register`::
mpl.colormaps.register(my_colormap)
To get a list of all registered colormaps, you can do::
from matplotlib import colormaps
list(colormaps)
"""
def __init__(self, cmaps):
self._cmaps = cmaps
self._builtin_cmaps = tuple(cmaps)
def __getitem__(self, item):
try:
return self._cmaps[item].copy()
except KeyError:
raise KeyError(f"{item!r} is not a known colormap name") from None
def __iter__(self):
return iter(self._cmaps)
def __len__(self):
return len(self._cmaps)
def __str__(self):
return ('ColormapRegistry; available colormaps:\n' +
', '.join(f"'{name}'" for name in self))
def __call__(self):
"""
Return a list of the registered colormap names.
This exists only for backward-compatibility in `.pyplot` which had a
``plt.colormaps()`` method. The recommended way to get this list is
now ``list(colormaps)``.
"""
return list(self)
def register(self, cmap, *, name=None, force=False):
"""
Register a new colormap.
The colormap name can then be used as a string argument to any ``cmap``
parameter in Matplotlib. It is also available in ``pyplot.get_cmap``.
The colormap registry stores a copy of the given colormap, so that
future changes to the original colormap instance do not affect the
registered colormap. Think of this as the registry taking a snapshot
of the colormap at registration.
Parameters
----------
cmap : matplotlib.colors.Colormap
The colormap to register.
name : str, optional
The name for the colormap. If not given, ``cmap.name`` is used.
force : bool, default: False
If False, a ValueError is raised if trying to overwrite an already
registered name. True supports overwriting registered colormaps
other than the builtin colormaps.
"""
_api.check_isinstance(colors.Colormap, cmap=cmap)
name = name or cmap.name
if name in self:
if not force:
# don't allow registering an already existing cmap
# unless explicitly asked to
raise ValueError(
f'A colormap named "{name}" is already registered.')
elif name in self._builtin_cmaps:
# We don't allow overriding a builtin.
raise ValueError("Re-registering the builtin cmap "
f"{name!r} is not allowed.")
# Warn that we are updating an already existing colormap
_api.warn_external(f"Overwriting the cmap {name!r} "
"that was already in the registry.")
self._cmaps[name] = cmap.copy()
# Someone may set the extremes of a builtin colormap and want to register it
# with a different name for future lookups. The object would still have the
# builtin name, so we should update it to the registered name
if self._cmaps[name].name != name:
self._cmaps[name].name = name
def unregister(self, name):
"""
Remove a colormap from the registry.
You cannot remove built-in colormaps.
If the named colormap is not registered, returns with no error, raises
if you try to de-register a default colormap.
.. warning::
Colormap names are currently a shared namespace that may be used
by multiple packages. Use `unregister` only if you know you
have registered that name before. In particular, do not
unregister just in case to clean the name before registering a
new colormap.
Parameters
----------
name : str
The name of the colormap to be removed.
Raises
------
ValueError
If you try to remove a default built-in colormap.
"""
if name in self._builtin_cmaps:
raise ValueError(f"cannot unregister {name!r} which is a builtin "
"colormap.")
self._cmaps.pop(name, None)
def get_cmap(self, cmap):
"""
Return a color map specified through *cmap*.
Parameters
----------
cmap : str or `~matplotlib.colors.Colormap` or None
- if a `.Colormap`, return it
- if a string, look it up in ``mpl.colormaps``
- if None, return the Colormap defined in :rc:`image.cmap`
Returns
-------
Colormap
"""
# get the default color map
if cmap is None:
return self[mpl.rcParams["image.cmap"]]
# if the user passed in a Colormap, simply return it
if isinstance(cmap, colors.Colormap):
return cmap
if isinstance(cmap, str):
_api.check_in_list(sorted(_colormaps), cmap=cmap)
# otherwise, it must be a string so look it up
return self[cmap]
raise TypeError(
'get_cmap expects None or an instance of a str or Colormap . ' +
f'you passed {cmap!r} of type {type(cmap)}'
)
# public access to the colormaps should be via `matplotlib.colormaps`. For now,
# we still create the registry here, but that should stay an implementation
# detail.
_colormaps = ColormapRegistry(_gen_cmap_registry())
globals().update(_colormaps)
_multivar_colormaps = ColormapRegistry(multivar_cmaps)
_bivar_colormaps = ColormapRegistry(bivar_cmaps)
# This is an exact copy of pyplot.get_cmap(). It was removed in 3.9, but apparently
# caused more user trouble than expected. Re-added for 3.9.1 and extended the
# deprecation period for two additional minor releases.
@_api.deprecated(
'3.7',
removal='3.11',
alternative="``matplotlib.colormaps[name]`` or ``matplotlib.colormaps.get_cmap()``"
" or ``pyplot.get_cmap()``"
)
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None.
Parameters
----------
name : `~matplotlib.colors.Colormap` or str or None, default: None
If a `.Colormap` instance, it will be returned. Otherwise, the name of
a colormap known to Matplotlib, which will be resampled by *lut*. The
default, None, means :rc:`image.cmap`.
lut : int or None, default: None
If *name* is not already a Colormap instance and *lut* is not None, the
colormap will be resampled to have *lut* entries in the lookup table.
Returns
-------
Colormap
"""
if name is None:
name = mpl.rcParams['image.cmap']
if isinstance(name, colors.Colormap):
return name
_api.check_in_list(sorted(_colormaps), name=name)
if lut is None:
return _colormaps[name]
else:
return _colormaps[name].resampled(lut)
def _ensure_cmap(cmap):
"""
Ensure that we have a `.Colormap` object.
For internal use to preserve type stability of errors.
Parameters
----------
cmap : None, str, Colormap
- if a `Colormap`, return it
- if a string, look it up in mpl.colormaps
- if None, look up the default color map in mpl.colormaps
Returns
-------
Colormap
"""
if isinstance(cmap, colors.Colormap):
return cmap
cmap_name = cmap if cmap is not None else mpl.rcParams["image.cmap"]
# use check_in_list to ensure type stability of the exception raised by
# the internal usage of this (ValueError vs KeyError)
if cmap_name not in _colormaps:
_api.check_in_list(sorted(_colormaps), cmap=cmap_name)
return mpl.colormaps[cmap_name]
venv\Lib\site-packages\matplotlib\collections.py
"""
Classes for the efficient drawing of large collections of objects that
share most properties, e.g., a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g., you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g., a large set of solid
line segments).
"""
import itertools
import functools
import math
from numbers import Number, Real
import warnings
import numpy as np
import matplotlib as mpl
from . import (_api, _path, artist, cbook, colorizer as mcolorizer, colors as mcolors,
_docstring, hatch as mhatch, lines as mlines, path as mpath, transforms)
from ._enums import JoinStyle, CapStyle
# "color" is excluded; it is a compound setter, and its docstring differs
# in LineCollection.
@_api.define_aliases({
"antialiased": ["antialiaseds", "aa"],
"edgecolor": ["edgecolors", "ec"],
"facecolor": ["facecolors", "fc"],
"linestyle": ["linestyles", "dashes", "ls"],
"linewidth": ["linewidths", "lw"],
"offset_transform": ["transOffset"],
})
class Collection(mcolorizer.ColorizingArtist):
r"""
Base class for Collections. Must be subclassed to be usable.
A Collection represents a sequence of `.Patch`\es that can be drawn
more efficiently together than individually. For example, when a single
path is being drawn repeatedly at different offsets, the renderer can
typically execute a ``draw_marker()`` call much more efficiently than a
series of repeated calls to ``draw_path()`` with the offsets put in
one-by-one.
Most properties of a collection can be configured per-element. Therefore,
Collections have "plural" versions of many of the properties of a `.Patch`
(e.g. `.Collection.get_paths` instead of `.Patch.get_path`). Exceptions are
the *zorder*, *hatch*, *pickradius*, *capstyle* and *joinstyle* properties,
which can only be set globally for the whole collection.
Besides these exceptions, all properties can be specified as single values
(applying to all elements) or sequences of values. The property of the
``i``\th element of the collection is::
prop[i % len(prop)]
Each Collection can optionally be used as its own `.ScalarMappable` by
passing the *norm* and *cmap* parameters to its constructor. If the
Collection's `.ScalarMappable` matrix ``_A`` has been set (via a call
to `.Collection.set_array`), then at draw time this internal scalar
mappable will be used to set the ``facecolors`` and ``edgecolors``,
ignoring those that were manually passed in.
"""
#: Either a list of 3x3 arrays or an Nx3x3 array (representing N
#: transforms), suitable for the `all_transforms` argument to
#: `~matplotlib.backend_bases.RendererBase.draw_path_collection`;
#: each 3x3 array is used to initialize an
#: `~matplotlib.transforms.Affine2D` object.
#: Each kind of collection defines this based on its arguments.
_transforms = np.empty((0, 3, 3))
# Whether to draw an edge by default. Set on a
# subclass-by-subclass basis.
_edge_default = False
@_docstring.interpd
def __init__(self, *,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
capstyle=None,
joinstyle=None,
antialiaseds=None,
offsets=None,
offset_transform=None,
norm=None, # optional for ScalarMappable
cmap=None, # ditto
colorizer=None,
pickradius=5.0,
hatch=None,
urls=None,
zorder=1,
**kwargs
):
"""
Parameters
----------
edgecolors : :mpltype:`color` or list of colors, default: :rc:`patch.edgecolor`
Edge color for each patch making up the collection. The special
value 'face' can be passed to make the edgecolor match the
facecolor.
facecolors : :mpltype:`color` or list of colors, default: :rc:`patch.facecolor`
Face color for each patch making up the collection.
linewidths : float or list of floats, default: :rc:`patch.linewidth`
Line width for each patch making up the collection.
linestyles : str or tuple or list thereof, default: 'solid'
Valid strings are ['solid', 'dashed', 'dashdot', 'dotted', '-',
'--', '-.', ':']. Dash tuples should be of the form::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink lengths
in points. For examples, see
:doc:`/gallery/lines_bars_and_markers/linestyles`.
capstyle : `.CapStyle`-like, default: 'butt'
Style to use for capping lines for all paths in the collection.
Allowed values are %(CapStyle)s.
joinstyle : `.JoinStyle`-like, default: 'round'
Style to use for joining lines for all paths in the collection.
Allowed values are %(JoinStyle)s.
antialiaseds : bool or list of bool, default: :rc:`patch.antialiased`
Whether each patch in the collection should be drawn with
antialiasing.
offsets : (float, float) or list thereof, default: (0, 0)
A vector by which to translate each patch after rendering (default
is no translation). The translation is performed in screen (pixel)
coordinates (i.e. after the Artist's transform is applied).
offset_transform : `~.Transform`, default: `.IdentityTransform`
A single transform which will be applied to each *offsets* vector
before it is used.
cmap, norm
Data normalization and colormapping parameters. See
`.ScalarMappable` for a detailed description.
hatch : str, optional
Hatching pattern to use in filled paths, if any. Valid strings are
['/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*']. See
:doc:`/gallery/shapes_and_collections/hatch_style_reference` for
the meaning of each hatch type.
pickradius : float, default: 5.0
If ``pickradius <= 0``, then `.Collection.contains` will return
``True`` whenever the test point is inside of one of the polygons
formed by the control points of a Path in the Collection. On the
other hand, if it is greater than 0, then we instead check if the
test point is contained in a stroke of width ``2*pickradius``
following any of the Paths in the Collection.
urls : list of str, default: None
A URL for each patch to link to once drawn. Currently only works
for the SVG backend. See :doc:`/gallery/misc/hyperlinks_sgskip` for
examples.
zorder : float, default: 1
The drawing order, shared by all Patches in the Collection. See
:doc:`/gallery/misc/zorder_demo` for all defaults and examples.
**kwargs
Remaining keyword arguments will be used to set properties as
``Collection.set_{key}(val)`` for each key-value pair in *kwargs*.
"""
super().__init__(self._get_colorizer(cmap, norm, colorizer))
# list of un-scaled dash patterns
# this is needed scaling the dash pattern by linewidth
self._us_linestyles = [(0, None)]
# list of dash patterns
self._linestyles = [(0, None)]
# list of unbroadcast/scaled linewidths
self._us_lw = [0]
self._linewidths = [0]
self._gapcolor = None # Currently only used by LineCollection.
# Flags set by _set_mappable_flags: are colors from mapping an array?
self._face_is_mapped = None
self._edge_is_mapped = None
self._mapped_colors = None # calculated in update_scalarmappable
self._hatch_color = mcolors.to_rgba(mpl.rcParams['hatch.color'])
self._hatch_linewidth = mpl.rcParams['hatch.linewidth']
self.set_facecolor(facecolors)
self.set_edgecolor(edgecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_pickradius(pickradius)
self.set_urls(urls)
self.set_hatch(hatch)
self.set_zorder(zorder)
if capstyle:
self.set_capstyle(capstyle)
else:
self._capstyle = None
if joinstyle:
self.set_joinstyle(joinstyle)
else:
self._joinstyle = None
if offsets is not None:
offsets = np.asanyarray(offsets, float)
# Broadcast (2,) -> (1, 2) but nothing else.
if offsets.shape == (2,):
offsets = offsets[None, :]
self._offsets = offsets
self._offset_transform = offset_transform
self._path_effects = None
self._internal_update(kwargs)
self._paths = None
def get_paths(self):
return self._paths
def set_paths(self, paths):
self._paths = paths
self.stale = True
def get_transforms(self):
return self._transforms
def get_offset_transform(self):
"""Return the `.Transform` instance used by this artist offset."""
if self._offset_transform is None:
self._offset_transform = transforms.IdentityTransform()
elif (not isinstance(self._offset_transform, transforms.Transform)
and hasattr(self._offset_transform, '_as_mpl_transform')):
self._offset_transform = \
self._offset_transform._as_mpl_transform(self.axes)
return self._offset_transform
def set_offset_transform(self, offset_transform):
"""
Set the artist offset transform.
Parameters
----------
offset_transform : `.Transform`
"""
self._offset_transform = offset_transform
def get_datalim(self, transData):
# Calculate the data limits and return them as a `.Bbox`.
#
# This operation depends on the transforms for the data in the
# collection and whether the collection has offsets:
#
# 1. offsets = None, transform child of transData: use the paths for
# the automatic limits (i.e. for LineCollection in streamline).
# 2. offsets != None: offset_transform is child of transData:
#
# a. transform is child of transData: use the path + offset for
# limits (i.e for bar).
# b. transform is not a child of transData: just use the offsets
# for the limits (i.e. for scatter)
#
# 3. otherwise return a null Bbox.
transform = self.get_transform()
offset_trf = self.get_offset_transform()
if not (isinstance(offset_trf, transforms.IdentityTransform)
or offset_trf.contains_branch(transData)):
# if the offsets are in some coords other than data,
# then don't use them for autoscaling.
return transforms.Bbox.null()
paths = self.get_paths()
if not len(paths):
# No paths to transform
return transforms.Bbox.null()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
# Don't convert transform to transform.get_affine() here because
# we may have transform.contains_branch(transData) but not
# transforms.get_affine().contains_branch(transData). But later,
# be careful to only apply the affine part that remains.
offsets = self.get_offsets()
if any(transform.contains_branch_seperately(transData)):
# collections that are just in data units (like quiver)
# can properly have the axes limits set by their shape +
# offset. LineCollections that have no offsets can
# also use this algorithm (like streamplot).
if isinstance(offsets, np.ma.MaskedArray):
offsets = offsets.filled(np.nan)
# get_path_collection_extents handles nan but not masked arrays
return mpath.get_path_collection_extents(
transform.get_affine() - transData, paths,
self.get_transforms(),
offset_trf.transform_non_affine(offsets),
offset_trf.get_affine().frozen())
# NOTE: None is the default case where no offsets were passed in
if self._offsets is not None:
# this is for collections that have their paths (shapes)
# in physical, axes-relative, or figure-relative units
# (i.e. like scatter). We can't uniquely set limits based on
# those shapes, so we just set the limits based on their
# location.
offsets = (offset_trf - transData).transform(offsets)
# note A-B means A B^{-1}
offsets = np.ma.masked_invalid(offsets)
if not offsets.mask.all():
bbox = transforms.Bbox.null()
bbox.update_from_data_xy(offsets)
return bbox
return transforms.Bbox.null()
def get_window_extent(self, renderer=None):
# TODO: check to ensure that this does not fail for
# cases other than scatter plot legend
return self.get_datalim(transforms.IdentityTransform())
def _prepare_points(self):
# Helper for drawing and hit testing.
transform = self.get_transform()
offset_trf = self.get_offset_transform()
offsets = self.get_offsets()
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(np.column_stack([xs, ys]), path.codes))
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = np.ma.column_stack([xs, ys])
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path)
for path in paths]
transform = transform.get_affine()
if not offset_trf.is_affine:
offsets = offset_trf.transform_non_affine(offsets)
# This might have changed an ndarray into a masked array.
offset_trf = offset_trf.get_affine()
if isinstance(offsets, np.ma.MaskedArray):
offsets = offsets.filled(np.nan)
# Changing from a masked array to nan-filled ndarray
# is probably most efficient at this point.
return transform, offset_trf, offsets, paths
@artist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
self.update_scalarmappable()
transform, offset_trf, offsets, paths = self._prepare_points()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_snap(self.get_snap())
if self._hatch:
gc.set_hatch(self._hatch)
gc.set_hatch_color(self._hatch_color)
gc.set_hatch_linewidth(self._hatch_linewidth)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
# If the collection is made up of a single shape/color/stroke,
# it can be rendered once and blitted multiple times, using
# `draw_markers` rather than `draw_path_collection`. This is
# *much* faster for Agg, and results in smaller file sizes in
# PDF/SVG/PS.
trans = self.get_transforms()
facecolors = self.get_facecolor()
edgecolors = self.get_edgecolor()
do_single_path_optimization = False
if (len(paths) == 1 and len(trans) <= 1 and
len(facecolors) == 1 and len(edgecolors) == 1 and
len(self._linewidths) == 1 and
all(ls[1] is None for ls in self._linestyles) and
len(self._antialiaseds) == 1 and len(self._urls) == 1 and
self.get_hatch() is None):
if len(trans):
combined_transform = transforms.Affine2D(trans[0]) + transform
else:
combined_transform = transform
extents = paths[0].get_extents(combined_transform)
if (extents.width < self.get_figure(root=True).bbox.width
and extents.height < self.get_figure(root=True).bbox.height):
do_single_path_optimization = True
if self._joinstyle:
gc.set_joinstyle(self._joinstyle)
if self._capstyle:
gc.set_capstyle(self._capstyle)
if do_single_path_optimization:
gc.set_foreground(tuple(edgecolors[0]))
gc.set_linewidth(self._linewidths[0])
gc.set_dashes(*self._linestyles[0])
gc.set_antialiased(self._antialiaseds[0])
gc.set_url(self._urls[0])
renderer.draw_markers(
gc, paths[0], combined_transform.frozen(),
mpath.Path(offsets), offset_trf, tuple(facecolors[0]))
else:
if self._gapcolor is not None:
# First draw paths within the gaps.
ipaths, ilinestyles = self._get_inverse_paths_linestyles()
renderer.draw_path_collection(
gc, transform.frozen(), ipaths,
self.get_transforms(), offsets, offset_trf,
[mcolors.to_rgba("none")], self._gapcolor,
self._linewidths, ilinestyles,
self._antialiaseds, self._urls,
"screen")
renderer.draw_path_collection(
gc, transform.frozen(), paths,
self.get_transforms(), offsets, offset_trf,
self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles,
self._antialiaseds, self._urls,
"screen") # offset_position, kept for backcompat.
gc.restore()
renderer.close_group(self.__class__.__name__)
self.stale = False
def set_pickradius(self, pickradius):
"""
Set the pick radius used for containment tests.
Parameters
----------
pickradius : float
Pick radius, in points.
"""
if not isinstance(pickradius, Real):
raise ValueError(
f"pickradius must be a real-valued number, not {pickradius!r}")
self._pickradius = pickradius
def get_pickradius(self):
return self._pickradius
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns ``bool, dict(ind=itemlist)``, where every item in itemlist
contains the event.
"""
if self._different_canvas(mouseevent) or not self.get_visible():
return False, {}
pickradius = (
float(self._picker)
if isinstance(self._picker, Number) and
self._picker is not True # the bool, not just nonzero or 1
else self._pickradius)
if self.axes:
self.axes._unstale_viewLim()
transform, offset_trf, offsets, paths = self._prepare_points()
# Tests if the point is contained on one of the polygons formed
# by the control points of each of the paths. A point is considered
# "on" a path if it would lie within a stroke of width 2*pickradius
# following the path. If pickradius <= 0, then we instead simply check
# if the point is *inside* of the path instead.
ind = _path.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, offset_trf, pickradius <= 0)
return len(ind) > 0, dict(ind=ind)
def set_urls(self, urls):
"""
Parameters
----------
urls : list of str or None
Notes
-----
URLs are currently only implemented by the SVG backend. They are
ignored by all other backends.
"""
self._urls = urls if urls is not None else [None]
self.stale = True
def get_urls(self):
"""
Return a list of URLs, one for each element of the collection.
The list contains *None* for elements without a URL. See
:doc:`/gallery/misc/hyperlinks_sgskip` for an example.
"""
return self._urls
def set_hatch(self, hatch):
r"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Unlike other properties such as linewidth and colors, hatching
can only be specified for the collection as a whole, not separately
for each member.
Parameters
----------
hatch : {'/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}
"""
# Use validate_hatch(list) after deprecation.
mhatch._validate_hatch_pattern(hatch)
self._hatch = hatch
self.stale = True
def get_hatch(self):
"""Return the current hatching pattern."""
return self._hatch
def set_hatch_linewidth(self, lw):
"""Set the hatch linewidth."""
self._hatch_linewidth = lw
def get_hatch_linewidth(self):
"""Return the hatch linewidth."""
return self._hatch_linewidth
def set_offsets(self, offsets):
"""
Set the offsets for the collection.
Parameters
----------
offsets : (N, 2) or (2,) array-like
"""
offsets = np.asanyarray(offsets)
if offsets.shape == (2,): # Broadcast (2,) -> (1, 2) but nothing else.
offsets = offsets[None, :]
cstack = (np.ma.column_stack if isinstance(offsets, np.ma.MaskedArray)
else np.column_stack)
self._offsets = cstack(
(np.asanyarray(self.convert_xunits(offsets[:, 0]), float),
np.asanyarray(self.convert_yunits(offsets[:, 1]), float)))
self.stale = True
def get_offsets(self):
"""Return the offsets for the collection."""
# Default to zeros in the no-offset (None) case
return np.zeros((1, 2)) if self._offsets is None else self._offsets
def _get_default_linewidth(self):
# This may be overridden in a subclass.
return mpl.rcParams['patch.linewidth'] # validated as float
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
Parameters
----------
lw : float or list of floats
"""
if lw is None:
lw = self._get_default_linewidth()
# get the un-scaled/broadcast lw
self._us_lw = np.atleast_1d(lw)
# scale all of the dash patterns.
self._linewidths, self._linestyles = self._bcast_lwls(
self._us_lw, self._us_linestyles)
self.stale = True
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq),
where ``onoffseq`` is an even length tuple of on and off ink in points.
Parameters
----------
ls : str or tuple or list thereof
Valid values for individual linestyles include {'-', '--', '-.',
':', '', (offset, on-off-seq)}. See `.Line2D.set_linestyle` for a
complete description.
"""
# get the list of raw 'unscaled' dash patterns
self._us_linestyles = mlines._get_dash_patterns(ls)
# broadcast and scale the lw and dash patterns
self._linewidths, self._linestyles = self._bcast_lwls(
self._us_lw, self._us_linestyles)
@_docstring.interpd
def set_capstyle(self, cs):
"""
Set the `.CapStyle` for the collection (for all its elements).
Parameters
----------
cs : `.CapStyle` or %(CapStyle)s
"""
self._capstyle = CapStyle(cs)
@_docstring.interpd
def get_capstyle(self):
"""
Return the cap style for the collection (for all its elements).
Returns
-------
%(CapStyle)s or None
"""
return self._capstyle.name if self._capstyle else None
@_docstring.interpd
def set_joinstyle(self, js):
"""
Set the `.JoinStyle` for the collection (for all its elements).
Parameters
----------
js : `.JoinStyle` or %(JoinStyle)s
"""
self._joinstyle = JoinStyle(js)
@_docstring.interpd
def get_joinstyle(self):
"""
Return the join style for the collection (for all its elements).
Returns
-------
%(JoinStyle)s or None
"""
return self._joinstyle.name if self._joinstyle else None
@staticmethod
def _bcast_lwls(linewidths, dashes):
"""
Internal helper function to broadcast + scale ls/lw
In the collection drawing code, the linewidth and linestyle are cycled
through as circular buffers (via ``v[i % len(v)]``). Thus, if we are
going to scale the dash pattern at set time (not draw time) we need to
do the broadcasting now and expand both lists to be the same length.
Parameters
----------
linewidths : list
line widths of collection
dashes : list
dash specification (offset, (dash pattern tuple))
Returns
-------
linewidths, dashes : list
Will be the same length, dashes are scaled by paired linewidth
"""
if mpl.rcParams['_internal.classic_mode']:
return linewidths, dashes
# make sure they are the same length so we can zip them
if len(dashes) != len(linewidths):
l_dashes = len(dashes)
l_lw = len(linewidths)
gcd = math.gcd(l_dashes, l_lw)
dashes = list(dashes) * (l_lw // gcd)
linewidths = list(linewidths) * (l_dashes // gcd)
# scale the dash patterns
dashes = [mlines._scale_dashes(o, d, lw)
for (o, d), lw in zip(dashes, linewidths)]
return linewidths, dashes
def get_antialiased(self):
"""
Get the antialiasing state for rendering.
Returns
-------
array of bools
"""
return self._antialiaseds
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
Parameters
----------
aa : bool or list of bools
"""
if aa is None:
aa = self._get_default_antialiased()
self._antialiaseds = np.atleast_1d(np.asarray(aa, bool))
self.stale = True
def _get_default_antialiased(self):
# This may be overridden in a subclass.
return mpl.rcParams['patch.antialiased']
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
Parameters
----------
c : :mpltype:`color` or list of RGBA tuples
See Also
--------
Collection.set_facecolor, Collection.set_edgecolor
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def _get_default_facecolor(self):
# This may be overridden in a subclass.
return mpl.rcParams['patch.facecolor']
def _set_facecolor(self, c):
if c is None:
c = self._get_default_facecolor()
self._facecolors = mcolors.to_rgba_array(c, self._alpha)
self.stale = True
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a color (all patches
have same color), or a sequence of colors; if it is a sequence the
patches will cycle through the sequence.
If *c* is 'none', the patch will not be filled.
Parameters
----------
c : :mpltype:`color` or list of :mpltype:`color`
"""
if isinstance(c, str) and c.lower() in ("none", "face"):
c = c.lower()
self._original_facecolor = c
self._set_facecolor(c)
def get_facecolor(self):
return self._facecolors
def get_edgecolor(self):
if cbook._str_equal(self._edgecolors, 'face'):
return self.get_facecolor()
else:
return self._edgecolors
def _get_default_edgecolor(self):
# This may be overridden in a subclass.
return mpl.rcParams['patch.edgecolor']
def _set_edgecolor(self, c):
set_hatch_color = True
if c is None:
if (mpl.rcParams['patch.force_edgecolor']
or self._edge_default
or cbook._str_equal(self._original_facecolor, 'none')):
c = self._get_default_edgecolor()
else:
c = 'none'
set_hatch_color = False
if cbook._str_lower_equal(c, 'face'):
self._edgecolors = 'face'
self.stale = True
return
self._edgecolors = mcolors.to_rgba_array(c, self._alpha)
if set_hatch_color and len(self._edgecolors):
self._hatch_color = tuple(self._edgecolors[0])
self.stale = True
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection.
Parameters
----------
c : :mpltype:`color` or list of :mpltype:`color` or 'face'
The collection edgecolor(s). If a sequence, the patches cycle
through it. If 'face', match the facecolor.
"""
# We pass through a default value for use in LineCollection.
# This allows us to maintain None as the default indicator in
# _original_edgecolor.
if isinstance(c, str) and c.lower() in ("none", "face"):
c = c.lower()
self._original_edgecolor = c
self._set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the transparency of the collection.
Parameters
----------
alpha : float or array of float or None
If not None, *alpha* values must be between 0 and 1, inclusive.
If an array is provided, its length must match the number of
elements in the collection. Masked values and nans are not
supported.
"""
artist.Artist._set_alpha_for_array(self, alpha)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
set_alpha.__doc__ = artist.Artist._set_alpha_for_array.__doc__
def get_linewidth(self):
return self._linewidths
def get_linestyle(self):
return self._linestyles
def _set_mappable_flags(self):
"""
Determine whether edges and/or faces are color-mapped.
This is a helper for update_scalarmappable.
It sets Boolean flags '_edge_is_mapped' and '_face_is_mapped'.
Returns
-------
mapping_change : bool
True if either flag is True, or if a flag has changed.
"""
# The flags are initialized to None to ensure this returns True
# the first time it is called.
edge0 = self._edge_is_mapped
face0 = self._face_is_mapped
# After returning, the flags must be Booleans, not None.
self._edge_is_mapped = False
self._face_is_mapped = False
if self._A is not None:
if not cbook._str_equal(self._original_facecolor, 'none'):
self._face_is_mapped = True
if cbook._str_equal(self._original_edgecolor, 'face'):
self._edge_is_mapped = True
else:
if self._original_edgecolor is None:
self._edge_is_mapped = True
mapped = self._face_is_mapped or self._edge_is_mapped
changed = (edge0 is None or face0 is None
or self._edge_is_mapped != edge0
or self._face_is_mapped != face0)
return mapped or changed
def update_scalarmappable(self):
"""
Update colors from the scalar mappable array, if any.
Assign colors to edges and faces based on the array and/or
colors that were directly set, as appropriate.
"""
if not self._set_mappable_flags():
return
# Allow possibility to call 'self.set_array(None)'.
if self._A is not None:
# QuadMesh can map 2d arrays (but pcolormesh supplies 1d array)
if self._A.ndim > 1 and not isinstance(self, _MeshData):
raise ValueError('Collections can only map rank 1 arrays')
if np.iterable(self._alpha):
if self._alpha.size != self._A.size:
raise ValueError(
f'Data array shape, {self._A.shape} '
'is incompatible with alpha array shape, '
f'{self._alpha.shape}. '
'This can occur with the deprecated '
'behavior of the "flat" shading option, '
'in which a row and/or column of the data '
'array is dropped.')
# pcolormesh, scatter, maybe others flatten their _A
self._alpha = self._alpha.reshape(self._A.shape)
self._mapped_colors = self.to_rgba(self._A, self._alpha)
if self._face_is_mapped:
self._facecolors = self._mapped_colors
else:
self._set_facecolor(self._original_facecolor)
if self._edge_is_mapped:
self._edgecolors = self._mapped_colors
else:
self._set_edgecolor(self._original_edgecolor)
self.stale = True
def get_fill(self):
"""Return whether face is colored."""
return not cbook._str_lower_equal(self._original_facecolor, "none")
def update_from(self, other):
"""Copy properties from other to self."""
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._mapped_colors = other._mapped_colors
self._edge_is_mapped = other._edge_is_mapped
self._original_edgecolor = other._original_edgecolor
self._edgecolors = other._edgecolors
self._face_is_mapped = other._face_is_mapped
self._original_facecolor = other._original_facecolor
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._us_linestyles = other._us_linestyles
self._pickradius = other._pickradius
self._hatch = other._hatch
# update_from for scalarmappable
self._A = other._A
self.norm = other.norm
self.cmap = other.cmap
self.stale = True
class _CollectionWithSizes(Collection):
"""
Base class for collections that have an array of sizes.
"""
_factor = 1.0
def get_sizes(self):
"""
Return the sizes ('areas') of the elements in the collection.
Returns
-------
array
The 'area' of each element.
"""
return self._sizes
def set_sizes(self, sizes, dpi=72.0):
"""
Set the sizes of each member of the collection.
Parameters
----------
sizes : `numpy.ndarray` or None
The size to set for each element of the collection. The
value is the 'area' of the element.
dpi : float, default: 72
The dpi of the canvas.
"""
if sizes is None:
self._sizes = np.array([])
self._transforms = np.empty((0, 3, 3))
else:
self._sizes = np.asarray(sizes)
self._transforms = np.zeros((len(self._sizes), 3, 3))
scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor
self._transforms[:, 0, 0] = scale
self._transforms[:, 1, 1] = scale
self._transforms[:, 2, 2] = 1.0
self.stale = True
@artist.allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.get_figure(root=True).dpi)
super().draw(renderer)
class PathCollection(_CollectionWithSizes):
r"""
A collection of `~.path.Path`\s, as created by e.g. `~.Axes.scatter`.
"""
def __init__(self, paths, sizes=None, **kwargs):
"""
Parameters
----------
paths : list of `.path.Path`
The paths that will make up the `.Collection`.
sizes : array-like
The factor by which to scale each drawn `~.path.Path`. One unit
squared in the Path's data space is scaled to be ``sizes**2``
points when rendered.
**kwargs
Forwarded to `.Collection`.
"""
super().__init__(**kwargs)
self.set_paths(paths)
self.set_sizes(sizes)
self.stale = True
def get_paths(self):
return self._paths
def legend_elements(self, prop="colors", num="auto",
fmt=None, func=lambda x: x, **kwargs):
"""
Create legend handles and labels for a PathCollection.
Each legend handle is a `.Line2D` representing the Path that was drawn,
and each label is a string that represents the Path.
This is useful for obtaining a legend for a `~.Axes.scatter` plot;
e.g.::
scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3], num=None)
plt.legend(*scatter.legend_elements())
creates three legend elements, one for each color with the numerical
values passed to *c* as the labels.
Also see the :ref:`automatedlegendcreation` example.
Parameters
----------
prop : {"colors", "sizes"}, default: "colors"
If "colors", the legend handles will show the different colors of
the collection. If "sizes", the legend will show the different
sizes. To set both, use *kwargs* to directly edit the `.Line2D`
properties.
num : int, None, "auto" (default), array-like, or `~.ticker.Locator`
Target number of elements to create.
If None, use all unique elements of the mappable array. If an
integer, target to use *num* elements in the normed range.
If *"auto"*, try to determine which option better suits the nature
of the data.
The number of created elements may slightly deviate from *num* due
to a `~.ticker.Locator` being used to find useful locations.
If a list or array, use exactly those elements for the legend.
Finally, a `~.ticker.Locator` can be provided.
fmt : str, `~matplotlib.ticker.Formatter`, or None (default)
The format or formatter to use for the labels. If a string must be
a valid input for a `.StrMethodFormatter`. If None (the default),
use a `.ScalarFormatter`.
func : function, default: ``lambda x: x``
Function to calculate the labels. Often the size (or color)
argument to `~.Axes.scatter` will have been pre-processed by the
user using a function ``s = f(x)`` to make the markers visible;
e.g. ``size = np.log10(x)``. Providing the inverse of this
function here allows that pre-processing to be inverted, so that
the legend labels have the correct values; e.g. ``func = lambda
x: 10**x``.
**kwargs
Allowed keyword arguments are *color* and *size*. E.g. it may be
useful to set the color of the markers if *prop="sizes"* is used;
similarly to set the size of the markers if *prop="colors"* is
used. Any further parameters are passed onto the `.Line2D`
instance. This may be useful to e.g. specify a different
*markeredgecolor* or *alpha* for the legend handles.
Returns
-------
handles : list of `.Line2D`
Visual representation of each element of the legend.
labels : list of str
The string labels for elements of the legend.
"""
handles = []
labels = []
hasarray = self.get_array() is not None
if fmt is None:
fmt = mpl.ticker.ScalarFormatter(useOffset=False, useMathText=True)
elif isinstance(fmt, str):
fmt = mpl.ticker.StrMethodFormatter(fmt)
fmt.create_dummy_axis()
if prop == "colors":
if not hasarray:
warnings.warn("Collection without array used. Make sure to "
"specify the values to be colormapped via the "
"`c` argument.")
return handles, labels
u = np.unique(self.get_array())
size = kwargs.pop("size", mpl.rcParams["lines.markersize"])
elif prop == "sizes":
u = np.unique(self.get_sizes())
color = kwargs.pop("color", "k")
else:
raise ValueError("Valid values for `prop` are 'colors' or "
f"'sizes'. You supplied '{prop}' instead.")
fu = func(u)
fmt.axis.set_view_interval(fu.min(), fu.max())
fmt.axis.set_data_interval(fu.min(), fu.max())
if num == "auto":
num = 9
if len(u) <= num:
num = None
if num is None:
values = u
label_values = func(values)
else:
if prop == "colors":
arr = self.get_array()
elif prop == "sizes":
arr = self.get_sizes()
if isinstance(num, mpl.ticker.Locator):
loc = num
elif np.iterable(num):
loc = mpl.ticker.FixedLocator(num)
else:
num = int(num)
loc = mpl.ticker.MaxNLocator(nbins=num, min_n_ticks=num-1,
steps=[1, 2, 2.5, 3, 5, 6, 8, 10])
label_values = loc.tick_values(func(arr).min(), func(arr).max())
cond = ((label_values >= func(arr).min()) &
(label_values <= func(arr).max()))
label_values = label_values[cond]
yarr = np.linspace(arr.min(), arr.max(), 256)
xarr = func(yarr)
ix = np.argsort(xarr)
values = np.interp(label_values, xarr[ix], yarr[ix])
kw = {"markeredgewidth": self.get_linewidths()[0],
"alpha": self.get_alpha(),
**kwargs}
for val, lab in zip(values, label_values):
if prop == "colors":
color = self.cmap(self.norm(val))
elif prop == "sizes":
size = np.sqrt(val)
if np.isclose(size, 0.0):
continue
h = mlines.Line2D([0], [0], ls="", color=color, ms=size,
marker=self.get_paths()[0], **kw)
handles.append(h)
if hasattr(fmt, "set_locs"):
fmt.set_locs(label_values)
l = fmt(lab)
labels.append(l)
return handles, labels
class PolyCollection(_CollectionWithSizes):
def __init__(self, verts, sizes=None, *, closed=True, **kwargs):
"""
Parameters
----------
verts : list of array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (M, 2).
sizes : array-like, default: None
Squared scaling factors for the polygons. The coordinates of each
polygon *verts_i* are multiplied by the square-root of the
corresponding entry in *sizes* (i.e., *sizes* specify the scaling
of areas). The scaling is applied before the Artist master
transform.
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
**kwargs
Forwarded to `.Collection`.
"""
super().__init__(**kwargs)
self.set_sizes(sizes)
self.set_verts(verts, closed)
self.stale = True
def set_verts(self, verts, closed=True):
"""
Set the vertices of the polygons.
Parameters
----------
verts : list of array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (M, 2).
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
"""
self.stale = True
if isinstance(verts, np.ma.MaskedArray):
verts = verts.astype(float).filled(np.nan)
# No need to do anything fancy if the path isn't closed.
if not closed:
self._paths = [mpath.Path(xy) for xy in verts]
return
# Fast path for arrays
if isinstance(verts, np.ndarray) and len(verts.shape) == 3:
verts_pad = np.concatenate((verts, verts[:, :1]), axis=1)
# Creating the codes once is much faster than having Path do it
# separately each time by passing closed=True.
codes = np.empty(verts_pad.shape[1], dtype=mpath.Path.code_type)
codes[:] = mpath.Path.LINETO
codes[0] = mpath.Path.MOVETO
codes[-1] = mpath.Path.CLOSEPOLY
self._paths = [mpath.Path(xy, codes) for xy in verts_pad]
return
self._paths = []
for xy in verts:
if len(xy):
self._paths.append(mpath.Path._create_closed(xy))
else:
self._paths.append(mpath.Path(xy))
set_paths = set_verts
def set_verts_and_codes(self, verts, codes):
"""Initialize vertices with path codes."""
if len(verts) != len(codes):
raise ValueError("'codes' must be a 1D list or array "
"with the same length of 'verts'")
self._paths = [mpath.Path(xy, cds) if len(xy) else mpath.Path(xy)
for xy, cds in zip(verts, codes)]
self.stale = True
class FillBetweenPolyCollection(PolyCollection):
"""
`.PolyCollection` that fills the area between two x- or y-curves.
"""
def __init__(
self, t_direction, t, f1, f2, *,
where=None, interpolate=False, step=None, **kwargs):
"""
Parameters
----------
t_direction : {{'x', 'y'}}
The axes on which the variable lies.
- 'x': the curves are ``(t, f1)`` and ``(t, f2)``.
- 'y': the curves are ``(f1, t)`` and ``(f2, t)``.
t : array-like
The ``t_direction`` coordinates of the nodes defining the curves.
f1 : array-like or float
The other coordinates of the nodes defining the first curve.
f2 : array-like or float
The other coordinates of the nodes defining the second curve.
where : array-like of bool, optional
Define *where* to exclude some {dir} regions from being filled.
The filled regions are defined by the coordinates ``t[where]``.
More precisely, fill between ``t[i]`` and ``t[i+1]`` if
``where[i] and where[i+1]``. Note that this definition implies
that an isolated *True* value between two *False* values in *where*
will not result in filling. Both sides of the *True* position
remain unfilled due to the adjacent *False* values.
interpolate : bool, default: False
This option is only relevant if *where* is used and the two curves
are crossing each other.
Semantically, *where* is often used for *f1* > *f2* or
similar. By default, the nodes of the polygon defining the filled
region will only be placed at the positions in the *t* array.
Such a polygon cannot describe the above semantics close to the
intersection. The t-sections containing the intersection are
simply clipped.
Setting *interpolate* to *True* will calculate the actual
intersection point and extend the filled region up to this point.
step : {{'pre', 'post', 'mid'}}, optional
Define *step* if the filling should be a step function,
i.e. constant in between *t*. The value determines where the
step will occur:
- 'pre': The f value is continued constantly to the left from
every *t* position, i.e. the interval ``(t[i-1], t[i]]`` has the
value ``f[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[t[i], t[i+1])`` has the
value ``f[i]``.
- 'mid': Steps occur half-way between the *t* positions.
**kwargs
Forwarded to `.PolyCollection`.
See Also
--------
.Axes.fill_between, .Axes.fill_betweenx
"""
self.t_direction = t_direction
self._interpolate = interpolate
self._step = step
verts = self._make_verts(t, f1, f2, where)
super().__init__(verts, **kwargs)
@staticmethod
def _f_dir_from_t(t_direction):
"""The direction that is other than `t_direction`."""
if t_direction == "x":
return "y"
elif t_direction == "y":
return "x"
else:
msg = f"t_direction must be 'x' or 'y', got {t_direction!r}"
raise ValueError(msg)
@property
def _f_direction(self):
"""The direction that is other than `self.t_direction`."""
return self._f_dir_from_t(self.t_direction)
def set_data(self, t, f1, f2, *, where=None):
"""
Set new values for the two bounding curves.
Parameters
----------
t : array-like
The ``self.t_direction`` coordinates of the nodes defining the curves.
f1 : array-like or float
The other coordinates of the nodes defining the first curve.
f2 : array-like or float
The other coordinates of the nodes defining the second curve.
where : array-like of bool, optional
Define *where* to exclude some {dir} regions from being filled.
The filled regions are defined by the coordinates ``t[where]``.
More precisely, fill between ``t[i]`` and ``t[i+1]`` if
``where[i] and where[i+1]``. Note that this definition implies
that an isolated *True* value between two *False* values in *where*
will not result in filling. Both sides of the *True* position
remain unfilled due to the adjacent *False* values.
See Also
--------
.PolyCollection.set_verts, .Line2D.set_data
"""
t, f1, f2 = self.axes._fill_between_process_units(
self.t_direction, self._f_direction, t, f1, f2)
verts = self._make_verts(t, f1, f2, where)
self.set_verts(verts)
def get_datalim(self, transData):
"""Calculate the data limits and return them as a `.Bbox`."""
datalim = transforms.Bbox.null()
datalim.update_from_data_xy((self.get_transform() - transData).transform(
np.concatenate([self._bbox, [self._bbox.minpos]])))
return datalim
def _make_verts(self, t, f1, f2, where):
"""
Make verts that can be forwarded to `.PolyCollection`.
"""
self._validate_shapes(self.t_direction, self._f_direction, t, f1, f2)
where = self._get_data_mask(t, f1, f2, where)
t, f1, f2 = np.broadcast_arrays(np.atleast_1d(t), f1, f2, subok=True)
self._bbox = transforms.Bbox.null()
self._bbox.update_from_data_xy(self._fix_pts_xy_order(np.concatenate([
np.stack((t[where], f[where]), axis=-1) for f in (f1, f2)])))
return [
self._make_verts_for_region(t, f1, f2, idx0, idx1)
for idx0, idx1 in cbook.contiguous_regions(where)
]
def _get_data_mask(self, t, f1, f2, where):
"""
Return a bool array, with True at all points that should eventually be rendered.
The array is True at a point if none of the data inputs
*t*, *f1*, *f2* is masked and if the input *where* is true at that point.
"""
if where is None:
where = True
else:
where = np.asarray(where, dtype=bool)
if where.size != t.size:
msg = "where size ({}) does not match {!r} size ({})".format(
where.size, self.t_direction, t.size)
raise ValueError(msg)
return where & ~functools.reduce(
np.logical_or, map(np.ma.getmaskarray, [t, f1, f2]))
@staticmethod
def _validate_shapes(t_dir, f_dir, t, f1, f2):
"""Validate that t, f1 and f2 are 1-dimensional and have the same length."""
names = (d + s for d, s in zip((t_dir, f_dir, f_dir), ("", "1", "2")))
for name, array in zip(names, [t, f1, f2]):
if array.ndim > 1:
raise ValueError(f"{name!r} is not 1-dimensional")
if t.size > 1 and array.size > 1 and t.size != array.size:
msg = "{!r} has size {}, but {!r} has an unequal size of {}".format(
t_dir, t.size, name, array.size)
raise ValueError(msg)
def _make_verts_for_region(self, t, f1, f2, idx0, idx1):
"""
Make ``verts`` for a contiguous region between ``idx0`` and ``idx1``, taking
into account ``step`` and ``interpolate``.
"""
t_slice = t[idx0:idx1]
f1_slice = f1[idx0:idx1]
f2_slice = f2[idx0:idx1]
if self._step is not None:
step_func = cbook.STEP_LOOKUP_MAP["steps-" + self._step]
t_slice, f1_slice, f2_slice = step_func(t_slice, f1_slice, f2_slice)
if self._interpolate:
start = self._get_interpolating_points(t, f1, f2, idx0)
end = self._get_interpolating_points(t, f1, f2, idx1)
else:
# Handle scalar f2 (e.g. 0): the fill should go all
# the way down to 0 even if none of the dep1 sample points do.
start = t_slice[0], f2_slice[0]
end = t_slice[-1], f2_slice[-1]
pts = np.concatenate((
np.asarray([start]),
np.stack((t_slice, f1_slice), axis=-1),
np.asarray([end]),
np.stack((t_slice, f2_slice), axis=-1)[::-1]))
return self._fix_pts_xy_order(pts)
@classmethod
def _get_interpolating_points(cls, t, f1, f2, idx):
"""Calculate interpolating points."""
im1 = max(idx - 1, 0)
t_values = t[im1:idx+1]
diff_values = f1[im1:idx+1] - f2[im1:idx+1]
f1_values = f1[im1:idx+1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return t[im1], f1[im1]
elif np.ma.is_masked(diff_values[0]):
return t[idx], f1[idx]
diff_root_t = cls._get_diff_root(0, diff_values, t_values)
diff_root_f = cls._get_diff_root(diff_root_t, t_values, f1_values)
return diff_root_t, diff_root_f
@staticmethod
def _get_diff_root(x, xp, fp):
"""Calculate diff root."""
order = xp.argsort()
return np.interp(x, xp[order], fp[order])
def _fix_pts_xy_order(self, pts):
"""
Fix pts calculation results with `self.t_direction`.
In the workflow, it is assumed that `self.t_direction` is 'x'. If this
is not true, we need to exchange the coordinates.
"""
return pts[:, ::-1] if self.t_direction == "y" else pts
class RegularPolyCollection(_CollectionWithSizes):
"""A collection of n-sided regular polygons."""
_path_generator = mpath.Path.unit_regular_polygon
_factor = np.pi ** (-1/2)
def __init__(self,
numsides,
*,
rotation=0,
sizes=(1,),
**kwargs):
"""
Parameters
----------
numsides : int
The number of sides of the polygon.
rotation : float
The rotation of the polygon in radians.
sizes : tuple of float
The area of the circle circumscribing the polygon in points^2.
**kwargs
Forwarded to `.Collection`.
Examples
--------
See :doc:`/gallery/event_handling/lasso_demo` for a complete example::
offsets = np.random.rand(20, 2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors=facecolors,
edgecolors=("black",),
linewidths=(1,),
offsets=offsets,
offset_transform=ax.transData,
)
"""
super().__init__(**kwargs)
self.set_sizes(sizes)
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
@artist.allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.get_figure(root=True).dpi)
self._transforms = [
transforms.Affine2D(x).rotate(-self._rotation).get_matrix()
for x in self._transforms
]
# Explicitly not super().draw, because set_sizes must be called before
# updating self._transforms.
Collection.draw(self, renderer)
class StarPolygonCollection(RegularPolyCollection):
"""Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
r"""
Represents a sequence of `.Line2D`\s that should be drawn together.
This class extends `.Collection` to represent a sequence of
`.Line2D`\s instead of just a sequence of `.Patch`\s.
Just as in `.Collection`, each property of a *LineCollection* may be either
a single value or a list of values. This list is then used cyclically for
each element of the LineCollection, so the property of the ``i``\th element
of the collection is::
prop[i % len(prop)]
The properties of each member of a *LineCollection* default to their values
in :rc:`lines.*` instead of :rc:`patch.*`, and the property *colors* is
added in place of *edgecolors*.
"""
_edge_default = True
def __init__(self, segments, # Can be None.
*,
zorder=2, # Collection.zorder is 1
**kwargs
):
"""
Parameters
----------
segments : list of (N, 2) array-like
A sequence ``[line0, line1, ...]`` where each line is a (N, 2)-shape
array-like containing points::
line0 = [(x0, y0), (x1, y1), ...]
Each line can contain a different number of points.
linewidths : float or list of float, default: :rc:`lines.linewidth`
The width of each line in points.
colors : :mpltype:`color` or list of color, default: :rc:`lines.color`
A sequence of RGBA tuples (e.g., arbitrary color strings, etc, not
allowed).
antialiaseds : bool or list of bool, default: :rc:`lines.antialiased`
Whether to use antialiasing for each line.
zorder : float, default: 2
zorder of the lines once drawn.
facecolors : :mpltype:`color` or list of :mpltype:`color`, default: 'none'
When setting *facecolors*, each line is interpreted as a boundary
for an area, implicitly closing the path from the last point to the
first point. The enclosed area is filled with *facecolor*.
In order to manually specify what should count as the "interior" of
each line, please use `.PathCollection` instead, where the
"interior" can be specified by appropriate usage of
`~.path.Path.CLOSEPOLY`.
**kwargs
Forwarded to `.Collection`.
"""
# Unfortunately, mplot3d needs this explicit setting of 'facecolors'.
kwargs.setdefault('facecolors', 'none')
super().__init__(
zorder=zorder,
**kwargs)
self.set_segments(segments)
def set_segments(self, segments):
if segments is None:
return
self._paths = [mpath.Path(seg) if isinstance(seg, np.ma.MaskedArray)
else mpath.Path(np.asarray(seg, float))
for seg in segments]
self.stale = True
set_verts = set_segments # for compatibility with PolyCollection
set_paths = set_segments
def get_segments(self):
"""
Returns
-------
list
List of segments in the LineCollection. Each list item contains an
array of vertices.
"""
segments = []
for path in self._paths:
vertices = [
vertex
for vertex, _
# Never simplify here, we want to get the data-space values
# back and there in no way to know the "right" simplification
# threshold so never try.
in path.iter_segments(simplify=False)
]
vertices = np.asarray(vertices)
segments.append(vertices)
return segments
def _get_default_linewidth(self):
return mpl.rcParams['lines.linewidth']
def _get_default_antialiased(self):
return mpl.rcParams['lines.antialiased']
def _get_default_edgecolor(self):
return mpl.rcParams['lines.color']
def _get_default_facecolor(self):
return 'none'
def set_alpha(self, alpha):
# docstring inherited
super().set_alpha(alpha)
if self._gapcolor is not None:
self.set_gapcolor(self._original_gapcolor)
def set_color(self, c):
"""
Set the edgecolor(s) of the LineCollection.
Parameters
----------
c : :mpltype:`color` or list of :mpltype:`color`
Single color (all lines have same color), or a
sequence of RGBA tuples; if it is a sequence the lines will
cycle through the sequence.
"""
self.set_edgecolor(c)
set_colors = set_color
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
def set_gapcolor(self, gapcolor):
"""
Set a color to fill the gaps in the dashed line style.
.. note::
Striped lines are created by drawing two interleaved dashed lines.
There can be overlaps between those two, which may result in
artifacts when using transparency.
This functionality is experimental and may change.
Parameters
----------
gapcolor : :mpltype:`color` or list of :mpltype:`color` or None
The color with which to fill the gaps. If None, the gaps are
unfilled.
"""
self._original_gapcolor = gapcolor
self._set_gapcolor(gapcolor)
def _set_gapcolor(self, gapcolor):
if gapcolor is not None:
gapcolor = mcolors.to_rgba_array(gapcolor, self._alpha)
self._gapcolor = gapcolor
self.stale = True
def get_gapcolor(self):
return self._gapcolor
def _get_inverse_paths_linestyles(self):
"""
Returns the path and pattern for the gaps in the non-solid lines.
This path and pattern is the inverse of the path and pattern used to
construct the non-solid lines. For solid lines, we set the inverse path
to nans to prevent drawing an inverse line.
"""
path_patterns = [
(mpath.Path(np.full((1, 2), np.nan)), ls)
if ls == (0, None) else
(path, mlines._get_inverse_dash_pattern(*ls))
for (path, ls) in
zip(self._paths, itertools.cycle(self._linestyles))]
return zip(*path_patterns)
class EventCollection(LineCollection):
"""
A collection of locations along a single axis at which an "event" occurred.
The events are given by a 1-dimensional array. They do not have an
amplitude and are displayed as parallel lines.
"""
_edge_default = True
def __init__(self,
positions, # Cannot be None.
orientation='horizontal',
*,
lineoffset=0,
linelength=1,
linewidth=None,
color=None,
linestyle='solid',
antialiased=None,
**kwargs
):
"""
Parameters
----------
positions : 1D array-like
Each value is an event.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The sequence of events is plotted along this direction.
The marker lines of the single events are along the orthogonal
direction.
lineoffset : float, default: 0
The offset of the center of the markers from the origin, in the
direction orthogonal to *orientation*.
linelength : float, default: 1
The total height of the marker (i.e. the marker stretches from
``lineoffset - linelength/2`` to ``lineoffset + linelength/2``).
linewidth : float or list thereof, default: :rc:`lines.linewidth`
The line width of the event lines, in points.
color : :mpltype:`color` or list of :mpltype:`color`, default: :rc:`lines.color`
The color of the event lines.
linestyle : str or tuple or list thereof, default: 'solid'
Valid strings are ['solid', 'dashed', 'dashdot', 'dotted',
'-', '--', '-.', ':']. Dash tuples should be of the form::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
antialiased : bool or list thereof, default: :rc:`lines.antialiased`
Whether to use antialiasing for drawing the lines.
**kwargs
Forwarded to `.LineCollection`.
Examples
--------
.. plot:: gallery/lines_bars_and_markers/eventcollection_demo.py
"""
super().__init__([],
linewidths=linewidth, linestyles=linestyle,
colors=color, antialiaseds=antialiased,
**kwargs)
self._is_horizontal = True # Initial value, may be switched below.
self._linelength = linelength
self._lineoffset = lineoffset
self.set_orientation(orientation)
self.set_positions(positions)
def get_positions(self):
"""
Return an array containing the floating-point values of the positions.
"""
pos = 0 if self.is_horizontal() else 1
return [segment[0, pos] for segment in self.get_segments()]
def set_positions(self, positions):
"""Set the positions of the events."""
if positions is None:
positions = []
if np.ndim(positions) != 1:
raise ValueError('positions must be one-dimensional')
lineoffset = self.get_lineoffset()
linelength = self.get_linelength()
pos_idx = 0 if self.is_horizontal() else 1
segments = np.empty((len(positions), 2, 2))
segments[:, :, pos_idx] = np.sort(positions)[:, None]
segments[:, 0, 1 - pos_idx] = lineoffset + linelength / 2
segments[:, 1, 1 - pos_idx] = lineoffset - linelength / 2
self.set_segments(segments)
def add_positions(self, position):
"""Add one or more events at the specified positions."""
if position is None or (hasattr(position, 'len') and
len(position) == 0):
return
positions = self.get_positions()
positions = np.hstack([positions, np.asanyarray(position)])
self.set_positions(positions)
extend_positions = append_positions = add_positions
def is_horizontal(self):
"""True if the eventcollection is horizontal, False if vertical."""
return self._is_horizontal
def get_orientation(self):
"""
Return the orientation of the event line ('horizontal' or 'vertical').
"""
return 'horizontal' if self.is_horizontal() else 'vertical'
def switch_orientation(self):
"""
Switch the orientation of the event line, either from vertical to
horizontal or vice versus.
"""
segments = self.get_segments()
for i, segment in enumerate(segments):
segments[i] = np.fliplr(segment)
self.set_segments(segments)
self._is_horizontal = not self.is_horizontal()
self.stale = True
def set_orientation(self, orientation):
"""
Set the orientation of the event line.
Parameters
----------
orientation : {'horizontal', 'vertical'}
"""
is_horizontal = _api.check_getitem(
{"horizontal": True, "vertical": False},
orientation=orientation)
if is_horizontal == self.is_horizontal():
return
self.switch_orientation()
def get_linelength(self):
"""Return the length of the lines used to mark each event."""
return self._linelength
def set_linelength(self, linelength):
"""Set the length of the lines used to mark each event."""
if linelength == self.get_linelength():
return
lineoffset = self.get_lineoffset()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._linelength = linelength
def get_lineoffset(self):
"""Return the offset of the lines used to mark each event."""
return self._lineoffset
def set_lineoffset(self, lineoffset):
"""Set the offset of the lines used to mark each event."""
if lineoffset == self.get_lineoffset():
return
linelength = self.get_linelength()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._lineoffset = lineoffset
def get_linewidth(self):
"""Get the width of the lines used to mark each event."""
return super().get_linewidth()[0]
def get_linewidths(self):
return super().get_linewidth()
def get_color(self):
"""Return the color of the lines used to mark each event."""
return self.get_colors()[0]
class CircleCollection(_CollectionWithSizes):
"""A collection of circles, drawn using splines."""
_factor = np.pi ** (-1/2)
def __init__(self, sizes, **kwargs):
"""
Parameters
----------
sizes : float or array-like
The area of each circle in points^2.
**kwargs
Forwarded to `.Collection`.
"""
super().__init__(**kwargs)
self.set_sizes(sizes)
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
class EllipseCollection(Collection):
"""A collection of ellipses, drawn using splines."""
def __init__(self, widths, heights, angles, *, units='points', **kwargs):
"""
Parameters
----------
widths : array-like
The lengths of the first axes (e.g., major axis lengths).
heights : array-like
The lengths of second axes.
angles : array-like
The angles of the first axes, degrees CCW from the x-axis.
units : {'points', 'inches', 'dots', 'width', 'height', 'x', 'y', 'xy'}
The units in which majors and minors are given; 'width' and
'height' refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units. 'xy' differs from all others in
that the angle as plotted varies with the aspect ratio, and equals
the specified angle only when the aspect ratio is unity. Hence
it behaves the same as the `~.patches.Ellipse` with
``axes.transData`` as its transform.
**kwargs
Forwarded to `Collection`.
"""
super().__init__(**kwargs)
self.set_widths(widths)
self.set_heights(heights)
self.set_angles(angles)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = np.empty((0, 3, 3))
self._paths = [mpath.Path.unit_circle()]
def _set_transforms(self):
"""Calculate transforms immediately before drawing."""
ax = self.axes
fig = self.get_figure(root=False)
if self._units == 'xy':
sc = 1
elif self._units == 'x':
sc = ax.bbox.width / ax.viewLim.width
elif self._units == 'y':
sc = ax.bbox.height / ax.viewLim.height
elif self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError(f'Unrecognized units: {self._units!r}')
self._transforms = np.zeros((len(self._widths), 3, 3))
widths = self._widths * sc
heights = self._heights * sc
sin_angle = np.sin(self._angles)
cos_angle = np.cos(self._angles)
self._transforms[:, 0, 0] = widths * cos_angle
self._transforms[:, 0, 1] = heights * -sin_angle
self._transforms[:, 1, 0] = widths * sin_angle
self._transforms[:, 1, 1] = heights * cos_angle
self._transforms[:, 2, 2] = 1.0
_affine = transforms.Affine2D
if self._units == 'xy':
m = ax.transData.get_affine().get_matrix().copy()
m[:2, 2:] = 0
self.set_transform(_affine(m))
def set_widths(self, widths):
"""Set the lengths of the first axes (e.g., major axis)."""
self._widths = 0.5 * np.asarray(widths).ravel()
self.stale = True
def set_heights(self, heights):
"""Set the lengths of second axes (e.g., minor axes)."""
self._heights = 0.5 * np.asarray(heights).ravel()
self.stale = True
def set_angles(self, angles):
"""Set the angles of the first axes, degrees CCW from the x-axis."""
self._angles = np.deg2rad(angles).ravel()
self.stale = True
def get_widths(self):
"""Get the lengths of the first axes (e.g., major axis)."""
return self._widths * 2
def get_heights(self):
"""Set the lengths of second axes (e.g., minor axes)."""
return self._heights * 2
def get_angles(self):
"""Get the angles of the first axes, degrees CCW from the x-axis."""
return np.rad2deg(self._angles)
@artist.allow_rasterization
def draw(self, renderer):
self._set_transforms()
super().draw(renderer)
class PatchCollection(Collection):
"""
A generic collection of patches.
PatchCollection draws faster than a large number of equivalent individual
Patches. It also makes it easier to assign a colormap to a heterogeneous
collection of patches.
"""
def __init__(self, patches, *, match_original=False, **kwargs):
"""
Parameters
----------
patches : list of `.Patch`
A sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
match_original : bool, default: False
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
**kwargs
All other parameters are forwarded to `.Collection`.
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their `.rcParams` patch setting, in
sequence form.
Notes
-----
The use of `~matplotlib.cm.ScalarMappable` functionality is optional.
If the `~matplotlib.cm.ScalarMappable` matrix ``_A`` has been set (via
a call to `~.ScalarMappable.set_array`), at draw time a call to scalar
mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.get_fill():
return patch.get_facecolor()
return [0, 0, 0, 0]
kwargs['facecolors'] = [determine_facecolor(p) for p in patches]
kwargs['edgecolors'] = [p.get_edgecolor() for p in patches]
kwargs['linewidths'] = [p.get_linewidth() for p in patches]
kwargs['linestyles'] = [p.get_linestyle() for p in patches]
kwargs['antialiaseds'] = [p.get_antialiased() for p in patches]
super().__init__(**kwargs)
self.set_paths(patches)
def set_paths(self, patches):
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
class TriMesh(Collection):
"""
Class for the efficient drawing of a triangular mesh using Gouraud shading.
A triangular mesh is a `~matplotlib.tri.Triangulation` object.
"""
def __init__(self, triangulation, **kwargs):
super().__init__(**kwargs)
self._triangulation = triangulation
self._shading = 'gouraud'
self._bbox = transforms.Bbox.unit()
# Unfortunately this requires a copy, unless Triangulation
# was rewritten.
xy = np.hstack((triangulation.x.reshape(-1, 1),
triangulation.y.reshape(-1, 1)))
self._bbox.update_from_data_xy(xy)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._triangulation)
@staticmethod
def convert_mesh_to_paths(tri):
"""
Convert a given mesh into a sequence of `.Path` objects.
This function is primarily of use to implementers of backends that do
not directly support meshes.
"""
triangles = tri.get_masked_triangles()
verts = np.stack((tri.x[triangles], tri.y[triangles]), axis=-1)
return [mpath.Path(x) for x in verts]
@artist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, gid=self.get_gid())
transform = self.get_transform()
# Get a list of triangles and the color at each vertex.
tri = self._triangulation
triangles = tri.get_masked_triangles()
verts = np.stack((tri.x[triangles], tri.y[triangles]), axis=-1)
self.update_scalarmappable()
colors = self._facecolors[triangles]
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
gc.restore()
renderer.close_group(self.__class__.__name__)
class _MeshData:
r"""
Class for managing the two dimensional coordinates of Quadrilateral meshes
and the associated data with them. This class is a mixin and is intended to
be used with another collection that will implement the draw separately.
A quadrilateral mesh is a grid of M by N adjacent quadrilaterals that are
defined via a (M+1, N+1) grid of vertices. The quadrilateral (m, n) is
defined by the vertices ::
(m+1, n) ----------- (m+1, n+1)
/ /
/ /
/ /
(m, n) -------- (m, n+1)
The mesh need not be regular and the polygons need not be convex.
Parameters
----------
coordinates : (M+1, N+1, 2) array-like
The vertices. ``coordinates[m, n]`` specifies the (x, y) coordinates
of vertex (m, n).
shading : {'flat', 'gouraud'}, default: 'flat'
"""
def __init__(self, coordinates, *, shading='flat'):
_api.check_shape((None, None, 2), coordinates=coordinates)
self._coordinates = coordinates
self._shading = shading
def set_array(self, A):
"""
Set the data values.
Parameters
----------
A : array-like
The mesh data. Supported array shapes are:
- (M, N) or (M*N,): a mesh with scalar data. The values are mapped
to colors using normalization and a colormap. See parameters
*norm*, *cmap*, *vmin*, *vmax*.
- (M, N, 3): an image with RGB values (0-1 float or 0-255 int).
- (M, N, 4): an image with RGBA values (0-1 float or 0-255 int),
i.e. including transparency.
If the values are provided as a 2D grid, the shape must match the
coordinates grid. If the values are 1D, they are reshaped to 2D.
M, N follow from the coordinates grid, where the coordinates grid
shape is (M, N) for 'gouraud' *shading* and (M+1, N+1) for 'flat'
shading.
"""
height, width = self._coordinates.shape[0:-1]
if self._shading == 'flat':
h, w = height - 1, width - 1
else:
h, w = height, width
ok_shapes = [(h, w, 3), (h, w, 4), (h, w), (h * w,)]
if A is not None:
shape = np.shape(A)
if shape not in ok_shapes:
raise ValueError(
f"For X ({width}) and Y ({height}) with {self._shading} "
f"shading, A should have shape "
f"{' or '.join(map(str, ok_shapes))}, not {A.shape}")
return super().set_array(A)
def get_coordinates(self):
"""
Return the vertices of the mesh as an (M+1, N+1, 2) array.
M, N are the number of quadrilaterals in the rows / columns of the
mesh, corresponding to (M+1, N+1) vertices.
The last dimension specifies the components (x, y).
"""
return self._coordinates
def get_edgecolor(self):
# docstring inherited
# Note that we want to return an array of shape (N*M, 4)
# a flattened RGBA collection
return super().get_edgecolor().reshape(-1, 4)
def get_facecolor(self):
# docstring inherited
# Note that we want to return an array of shape (N*M, 4)
# a flattened RGBA collection
return super().get_facecolor().reshape(-1, 4)
@staticmethod
def _convert_mesh_to_paths(coordinates):
"""
Convert a given mesh into a sequence of `.Path` objects.
This function is primarily of use to implementers of backends that do
not directly support quadmeshes.
"""
if isinstance(coordinates, np.ma.MaskedArray):
c = coordinates.data
else:
c = coordinates
points = np.concatenate([
c[:-1, :-1],
c[:-1, 1:],
c[1:, 1:],
c[1:, :-1],
c[:-1, :-1]
], axis=2).reshape((-1, 5, 2))
return [mpath.Path(x) for x in points]
def _convert_mesh_to_triangles(self, coordinates):
"""
Convert a given mesh into a sequence of triangles, each point
with its own color. The result can be used to construct a call to
`~.RendererBase.draw_gouraud_triangles`.
"""
if isinstance(coordinates, np.ma.MaskedArray):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_b + p_c + p_d) / 4.0
triangles = np.concatenate([
p_a, p_b, p_center,
p_b, p_c, p_center,
p_c, p_d, p_center,
p_d, p_a, p_center,
], axis=2).reshape((-1, 3, 2))
c = self.get_facecolor().reshape((*coordinates.shape[:2], 4))
z = self.get_array()
mask = z.mask if np.ma.is_masked(z) else None
if mask is not None:
c[mask, 3] = np.nan
c_a = c[:-1, :-1]
c_b = c[:-1, 1:]
c_c = c[1:, 1:]
c_d = c[1:, :-1]
c_center = (c_a + c_b + c_c + c_d) / 4.0
colors = np.concatenate([
c_a, c_b, c_center,
c_b, c_c, c_center,
c_c, c_d, c_center,
c_d, c_a, c_center,
], axis=2).reshape((-1, 3, 4))
tmask = np.isnan(colors[..., 2, 3])
return triangles[~tmask], colors[~tmask]
class QuadMesh(_MeshData, Collection):
r"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh is a grid of M by N adjacent quadrilaterals that are
defined via a (M+1, N+1) grid of vertices. The quadrilateral (m, n) is
defined by the vertices ::
(m+1, n) ----------- (m+1, n+1)
/ /
/ /
/ /
(m, n) -------- (m, n+1)
The mesh need not be regular and the polygons need not be convex.
Parameters
----------
coordinates : (M+1, N+1, 2) array-like
The vertices. ``coordinates[m, n]`` specifies the (x, y) coordinates
of vertex (m, n).
antialiased : bool, default: True
shading : {'flat', 'gouraud'}, default: 'flat'
Notes
-----
Unlike other `.Collection`\s, the default *pickradius* of `.QuadMesh` is 0,
i.e. `~.Artist.contains` checks whether the test point is within any of the
mesh quadrilaterals.
"""
def __init__(self, coordinates, *, antialiased=True, shading='flat',
**kwargs):
kwargs.setdefault("pickradius", 0)
super().__init__(coordinates=coordinates, shading=shading)
Collection.__init__(self, **kwargs)
self._antialiased = antialiased
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(self._coordinates.reshape(-1, 2))
self.set_mouseover(False)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self._convert_mesh_to_paths(self._coordinates)
self.stale = True
def get_datalim(self, transData):
return (self.get_transform() - transData).transform_bbox(self._bbox)
@artist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
offset_trf = self.get_offset_transform()
offsets = self.get_offsets()
if self.have_units():
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = np.column_stack([xs, ys])
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape((-1, 2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not offset_trf.is_affine:
offsets = offset_trf.transform_non_affine(offsets)
offset_trf = offset_trf.get_affine()
gc = renderer.new_gc()
gc.set_snap(self.get_snap())
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self._convert_mesh_to_triangles(coordinates)
renderer.draw_gouraud_triangles(
gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(),
coordinates.shape[1] - 1, coordinates.shape[0] - 1,
coordinates, offsets, offset_trf,
# Backends expect flattened rgba arrays (n*m, 4) for fc and ec
self.get_facecolor().reshape((-1, 4)),
self._antialiased, self.get_edgecolors().reshape((-1, 4)))
gc.restore()
renderer.close_group(self.__class__.__name__)
self.stale = False
def get_cursor_data(self, event):
contained, info = self.contains(event)
if contained and self.get_array() is not None:
return self.get_array().ravel()[info["ind"]]
return None
class PolyQuadMesh(_MeshData, PolyCollection):
"""
Class for drawing a quadrilateral mesh as individual Polygons.
A quadrilateral mesh is a grid of M by N adjacent quadrilaterals that are
defined via a (M+1, N+1) grid of vertices. The quadrilateral (m, n) is
defined by the vertices ::
(m+1, n) ----------- (m+1, n+1)
/ /
/ /
/ /
(m, n) -------- (m, n+1)
The mesh need not be regular and the polygons need not be convex.
Parameters
----------
coordinates : (M+1, N+1, 2) array-like
The vertices. ``coordinates[m, n]`` specifies the (x, y) coordinates
of vertex (m, n).
Notes
-----
Unlike `.QuadMesh`, this class will draw each cell as an individual Polygon.
This is significantly slower, but allows for more flexibility when wanting
to add additional properties to the cells, such as hatching.
Another difference from `.QuadMesh` is that if any of the vertices or data
of a cell are masked, that Polygon will **not** be drawn and it won't be in
the list of paths returned.
"""
def __init__(self, coordinates, **kwargs):
super().__init__(coordinates=coordinates)
PolyCollection.__init__(self, verts=[], **kwargs)
# Setting the verts updates the paths of the PolyCollection
# This is called after the initializers to make sure the kwargs
# have all been processed and available for the masking calculations
self._set_unmasked_verts()
def _get_unmasked_polys(self):
"""Get the unmasked regions using the coordinates and array"""
# mask(X) | mask(Y)
mask = np.any(np.ma.getmaskarray(self._coordinates), axis=-1)
# We want the shape of the polygon, which is the corner of each X/Y array
mask = (mask[0:-1, 0:-1] | mask[1:, 1:] | mask[0:-1, 1:] | mask[1:, 0:-1])
arr = self.get_array()
if arr is not None:
arr = np.ma.getmaskarray(arr)
if arr.ndim == 3:
# RGB(A) case
mask |= np.any(arr, axis=-1)
elif arr.ndim == 2:
mask |= arr
else:
mask |= arr.reshape(self._coordinates[:-1, :-1, :].shape[:2])
return ~mask
def _set_unmasked_verts(self):
X = self._coordinates[..., 0]
Y = self._coordinates[..., 1]
unmask = self._get_unmasked_polys()
X1 = np.ma.filled(X[:-1, :-1])[unmask]
Y1 = np.ma.filled(Y[:-1, :-1])[unmask]
X2 = np.ma.filled(X[1:, :-1])[unmask]
Y2 = np.ma.filled(Y[1:, :-1])[unmask]
X3 = np.ma.filled(X[1:, 1:])[unmask]
Y3 = np.ma.filled(Y[1:, 1:])[unmask]
X4 = np.ma.filled(X[:-1, 1:])[unmask]
Y4 = np.ma.filled(Y[:-1, 1:])[unmask]
npoly = len(X1)
xy = np.ma.stack([X1, Y1, X2, Y2, X3, Y3, X4, Y4, X1, Y1], axis=-1)
verts = xy.reshape((npoly, 5, 2))
self.set_verts(verts)
def get_edgecolor(self):
# docstring inherited
# We only want to return the facecolors of the polygons
# that were drawn.
ec = super().get_edgecolor()
unmasked_polys = self._get_unmasked_polys().ravel()
if len(ec) != len(unmasked_polys):
# Mapping is off
return ec
return ec[unmasked_polys, :]
def get_facecolor(self):
# docstring inherited
# We only want to return the facecolors of the polygons
# that were drawn.
fc = super().get_facecolor()
unmasked_polys = self._get_unmasked_polys().ravel()
if len(fc) != len(unmasked_polys):
# Mapping is off
return fc
return fc[unmasked_polys, :]
def set_array(self, A):
# docstring inherited
prev_unmask = self._get_unmasked_polys()
super().set_array(A)
# If the mask has changed at all we need to update
# the set of Polys that we are drawing
if not np.array_equal(prev_unmask, self._get_unmasked_polys()):
self._set_unmasked_verts()
venv\Lib\site-packages\matplotlib\colorbar.py
"""
Colorbars are a visualization of the mapping from scalar values to colors.
In Matplotlib they are drawn into a dedicated `~.axes.Axes`.
.. note::
Colorbars are typically created through `.Figure.colorbar` or its pyplot
wrapper `.pyplot.colorbar`, which internally use `.Colorbar` together with
`.make_axes_gridspec` (for `.GridSpec`-positioned Axes) or `.make_axes` (for
non-`.GridSpec`-positioned Axes).
End-users most likely won't need to directly use this module's API.
"""
import logging
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, collections, cm, colors, contour, ticker
import matplotlib.artist as martist
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.spines as mspines
import matplotlib.transforms as mtransforms
from matplotlib import _docstring
_log = logging.getLogger(__name__)
_docstring.interpd.register(
_make_axes_kw_doc="""
location : None or {'left', 'right', 'top', 'bottom'}
The location, relative to the parent Axes, where the colorbar Axes
is created. It also determines the *orientation* of the colorbar
(colorbars on the left and right are vertical, colorbars at the top
and bottom are horizontal). If None, the location will come from the
*orientation* if it is set (vertical colorbars on the right, horizontal
ones at the bottom), or default to 'right' if *orientation* is unset.
orientation : None or {'vertical', 'horizontal'}
The orientation of the colorbar. It is preferable to set the *location*
of the colorbar, as that also determines the *orientation*; passing
incompatible values for *location* and *orientation* raises an exception.
fraction : float, default: 0.15
Fraction of original Axes to use for colorbar.
shrink : float, default: 1.0
Fraction by which to multiply the size of the colorbar.
aspect : float, default: 20
Ratio of long to short dimensions.
pad : float, default: 0.05 if vertical, 0.15 if horizontal
Fraction of original Axes between colorbar and new image Axes.
anchor : (float, float), optional
The anchor point of the colorbar Axes.
Defaults to (0.0, 0.5) if vertical; (0.5, 1.0) if horizontal.
panchor : (float, float), or *False*, optional
The anchor point of the colorbar parent Axes. If *False*, the parent
axes' anchor will be unchanged.
Defaults to (1.0, 0.5) if vertical; (0.5, 0.0) if horizontal.""",
_colormap_kw_doc="""
extend : {'neither', 'both', 'min', 'max'}
Make pointed end(s) for out-of-range values (unless 'neither'). These are
set for a given colormap using the colormap set_under and set_over methods.
extendfrac : {*None*, 'auto', length, lengths}
If set to *None*, both the minimum and maximum triangular colorbar
extensions will have a length of 5% of the interior colorbar length (this
is the default setting).
If set to 'auto', makes the triangular colorbar extensions the same lengths
as the interior boxes (when *spacing* is set to 'uniform') or the same
lengths as the respective adjacent interior boxes (when *spacing* is set to
'proportional').
If a scalar, indicates the length of both the minimum and maximum
triangular colorbar extensions as a fraction of the interior colorbar
length. A two-element sequence of fractions may also be given, indicating
the lengths of the minimum and maximum colorbar extensions respectively as
a fraction of the interior colorbar length.
extendrect : bool
If *False* the minimum and maximum colorbar extensions will be triangular
(the default). If *True* the extensions will be rectangular.
ticks : None or list of ticks or Locator
If None, ticks are determined automatically from the input.
format : None or str or Formatter
If None, `~.ticker.ScalarFormatter` is used.
Format strings, e.g., ``"%4.2e"`` or ``"{x:.2e}"``, are supported.
An alternative `~.ticker.Formatter` may be given instead.
drawedges : bool
Whether to draw lines at color boundaries.
label : str
The label on the colorbar's long axis.
boundaries, values : None or a sequence
If unset, the colormap will be displayed on a 0-1 scale.
If sequences, *values* must have a length 1 less than *boundaries*. For
each region delimited by adjacent entries in *boundaries*, the color mapped
to the corresponding value in *values* will be used. The size of each
region is determined by the *spacing* parameter.
Normally only useful for indexed colors (i.e. ``norm=NoNorm()``) or other
unusual circumstances.
spacing : {'uniform', 'proportional'}
For discrete colorbars (`.BoundaryNorm` or contours), 'uniform' gives each
color the same space; 'proportional' makes the space proportional to the
data interval.""")
def _set_ticks_on_axis_warn(*args, **kwargs):
# a top level function which gets put in at the axes'
# set_xticks and set_yticks by Colorbar.__init__.
_api.warn_external("Use the colorbar set_ticks() method instead.")
class _ColorbarSpine(mspines.Spine):
def __init__(self, axes):
self._ax = axes
super().__init__(axes, 'colorbar', mpath.Path(np.empty((0, 2))))
mpatches.Patch.set_transform(self, axes.transAxes)
def get_window_extent(self, renderer=None):
# This Spine has no Axis associated with it, and doesn't need to adjust
# its location, so we can directly get the window extent from the
# super-super-class.
return mpatches.Patch.get_window_extent(self, renderer=renderer)
def set_xy(self, xy):
self._path = mpath.Path(xy, closed=True)
self._xy = xy
self.stale = True
def draw(self, renderer):
ret = mpatches.Patch.draw(self, renderer)
self.stale = False
return ret
class _ColorbarAxesLocator:
"""
Shrink the Axes if there are triangular or rectangular extends.
"""
def __init__(self, cbar):
self._cbar = cbar
self._orig_locator = cbar.ax._axes_locator
def __call__(self, ax, renderer):
if self._orig_locator is not None:
pos = self._orig_locator(ax, renderer)
else:
pos = ax.get_position(original=True)
if self._cbar.extend == 'neither':
return pos
y, extendlen = self._cbar._proportional_y()
if not self._cbar._extend_lower():
extendlen[0] = 0
if not self._cbar._extend_upper():
extendlen[1] = 0
len = sum(extendlen) + 1
shrink = 1 / len
offset = extendlen[0] / len
# we need to reset the aspect ratio of the axes to account
# of the extends...
if hasattr(ax, '_colorbar_info'):
aspect = ax._colorbar_info['aspect']
else:
aspect = False
# now shrink and/or offset to take into account the
# extend tri/rectangles.
if self._cbar.orientation == 'vertical':
if aspect:
self._cbar.ax.set_box_aspect(aspect*shrink)
pos = pos.shrunk(1, shrink).translated(0, offset * pos.height)
else:
if aspect:
self._cbar.ax.set_box_aspect(1/(aspect * shrink))
pos = pos.shrunk(shrink, 1).translated(offset * pos.width, 0)
return pos
def get_subplotspec(self):
# make tight_layout happy..
return (
self._cbar.ax.get_subplotspec()
or getattr(self._orig_locator, "get_subplotspec", lambda: None)())
@_docstring.interpd
class Colorbar:
r"""
Draw a colorbar in an existing Axes.
Typically, colorbars are created using `.Figure.colorbar` or
`.pyplot.colorbar` and associated with `.ScalarMappable`\s (such as an
`.AxesImage` generated via `~.axes.Axes.imshow`).
In order to draw a colorbar not associated with other elements in the
figure, e.g. when showing a colormap by itself, one can create an empty
`.ScalarMappable`, or directly pass *cmap* and *norm* instead of *mappable*
to `Colorbar`.
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance in which the colorbar is drawn.
lines : list
A list of `.LineCollection` (empty if no lines were drawn).
dividers : `.LineCollection`
A LineCollection (empty if *drawedges* is ``False``).
"""
n_rasterize = 50 # rasterize solids if number of colors >= n_rasterize
def __init__(
self, ax, mappable=None, *,
alpha=None,
location=None,
extend=None,
extendfrac=None,
extendrect=False,
ticks=None,
format=None,
values=None,
boundaries=None,
spacing='uniform',
drawedges=False,
label='',
cmap=None, norm=None, # redundant with *mappable*
orientation=None, ticklocation='auto', # redundant with *location*
):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance in which the colorbar is drawn.
mappable : `.ScalarMappable`
The mappable whose colormap and norm will be used.
To show the colors versus index instead of on a 0-1 scale, set the
mappable's norm to ``colors.NoNorm()``.
alpha : float
The colorbar transparency between 0 (transparent) and 1 (opaque).
location : None or {'left', 'right', 'top', 'bottom'}
Set the colorbar's *orientation* and *ticklocation*. Colorbars on
the left and right are vertical, colorbars at the top and bottom
are horizontal. The *ticklocation* is the same as *location*, so if
*location* is 'top', the ticks are on the top. *orientation* and/or
*ticklocation* can be provided as well and overrides the value set by
*location*, but there will be an error for incompatible combinations.
.. versionadded:: 3.7
%(_colormap_kw_doc)s
Other Parameters
----------------
cmap : `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The colormap to use. This parameter is ignored, unless *mappable* is
None.
norm : `~matplotlib.colors.Normalize`
The normalization to use. This parameter is ignored, unless *mappable*
is None.
orientation : None or {'vertical', 'horizontal'}
If None, use the value determined by *location*. If both
*orientation* and *location* are None then defaults to 'vertical'.
ticklocation : {'auto', 'left', 'right', 'top', 'bottom'}
The location of the colorbar ticks. The *ticklocation* must match
*orientation*. For example, a horizontal colorbar can only have ticks
at the top or the bottom. If 'auto', the ticks will be the same as
*location*, so a colorbar to the left will have ticks to the left. If
*location* is None, the ticks will be at the bottom for a horizontal
colorbar and at the right for a vertical.
"""
if mappable is None:
mappable = cm.ScalarMappable(norm=norm, cmap=cmap)
self.mappable = mappable
cmap = mappable.cmap
norm = mappable.norm
filled = True
if isinstance(mappable, contour.ContourSet):
cs = mappable
alpha = cs.get_alpha()
boundaries = cs._levels
values = cs.cvalues
extend = cs.extend
filled = cs.filled
if ticks is None:
ticks = ticker.FixedLocator(cs.levels, nbins=10)
elif isinstance(mappable, martist.Artist):
alpha = mappable.get_alpha()
mappable.colorbar = self
mappable.colorbar_cid = mappable.callbacks.connect(
'changed', self.update_normal)
location_orientation = _get_orientation_from_location(location)
_api.check_in_list(
[None, 'vertical', 'horizontal'], orientation=orientation)
_api.check_in_list(
['auto', 'left', 'right', 'top', 'bottom'],
ticklocation=ticklocation)
_api.check_in_list(
['uniform', 'proportional'], spacing=spacing)
if location_orientation is not None and orientation is not None:
if location_orientation != orientation:
raise TypeError(
"location and orientation are mutually exclusive")
else:
orientation = orientation or location_orientation or "vertical"
self.ax = ax
self.ax._axes_locator = _ColorbarAxesLocator(self)
if extend is None:
if (not isinstance(mappable, contour.ContourSet)
and getattr(cmap, 'colorbar_extend', False) is not False):
extend = cmap.colorbar_extend
elif hasattr(norm, 'extend'):
extend = norm.extend
else:
extend = 'neither'
self.alpha = None
# Call set_alpha to handle array-like alphas properly
self.set_alpha(alpha)
self.cmap = cmap
self.norm = norm
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = _api.check_getitem(
{'neither': slice(0, None), 'both': slice(1, -1),
'min': slice(1, None), 'max': slice(0, -1)},
extend=extend)
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self._filled = filled
self.extendfrac = extendfrac
self.extendrect = extendrect
self._extend_patches = []
self.solids = None
self.solids_patches = []
self.lines = []
for spine in self.ax.spines.values():
spine.set_visible(False)
self.outline = self.ax.spines['outline'] = _ColorbarSpine(self.ax)
self.dividers = collections.LineCollection(
[],
colors=[mpl.rcParams['axes.edgecolor']],
linewidths=[0.5 * mpl.rcParams['axes.linewidth']],
clip_on=False)
self.ax.add_collection(self.dividers)
self._locator = None
self._minorlocator = None
self._formatter = None
self._minorformatter = None
if ticklocation == 'auto':
ticklocation = _get_ticklocation_from_orientation(
orientation) if location is None else location
self.ticklocation = ticklocation
self.set_label(label)
self._reset_locator_formatter_scale()
if np.iterable(ticks):
self._locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self._locator = ticks
if isinstance(format, str):
# Check format between FormatStrFormatter and StrMethodFormatter
try:
self._formatter = ticker.FormatStrFormatter(format)
_ = self._formatter(0)
except (TypeError, ValueError):
self._formatter = ticker.StrMethodFormatter(format)
else:
self._formatter = format # Assume it is a Formatter or None
self._draw_all()
if isinstance(mappable, contour.ContourSet) and not mappable.filled:
self.add_lines(mappable)
# Link the Axes and Colorbar for interactive use
self.ax._colorbar = self
# Don't navigate on any of these types of mappables
if (isinstance(self.norm, (colors.BoundaryNorm, colors.NoNorm)) or
isinstance(self.mappable, contour.ContourSet)):
self.ax.set_navigate(False)
# These are the functions that set up interactivity on this colorbar
self._interactive_funcs = ["_get_view", "_set_view",
"_set_view_from_bbox", "drag_pan"]
for x in self._interactive_funcs:
setattr(self.ax, x, getattr(self, x))
# Set the cla function to the cbar's method to override it
self.ax.cla = self._cbar_cla
# Callbacks for the extend calculations to handle inverting the axis
self._extend_cid1 = self.ax.callbacks.connect(
"xlim_changed", self._do_extends)
self._extend_cid2 = self.ax.callbacks.connect(
"ylim_changed", self._do_extends)
@property
def long_axis(self):
"""Axis that has decorations (ticks, etc) on it."""
if self.orientation == 'vertical':
return self.ax.yaxis
return self.ax.xaxis
@property
def locator(self):
"""Major tick `.Locator` for the colorbar."""
return self.long_axis.get_major_locator()
@locator.setter
def locator(self, loc):
self.long_axis.set_major_locator(loc)
self._locator = loc
@property
def minorlocator(self):
"""Minor tick `.Locator` for the colorbar."""
return self.long_axis.get_minor_locator()
@minorlocator.setter
def minorlocator(self, loc):
self.long_axis.set_minor_locator(loc)
self._minorlocator = loc
@property
def formatter(self):
"""Major tick label `.Formatter` for the colorbar."""
return self.long_axis.get_major_formatter()
@formatter.setter
def formatter(self, fmt):
self.long_axis.set_major_formatter(fmt)
self._formatter = fmt
@property
def minorformatter(self):
"""Minor tick `.Formatter` for the colorbar."""
return self.long_axis.get_minor_formatter()
@minorformatter.setter
def minorformatter(self, fmt):
self.long_axis.set_minor_formatter(fmt)
self._minorformatter = fmt
def _cbar_cla(self):
"""Function to clear the interactive colorbar state."""
for x in self._interactive_funcs:
delattr(self.ax, x)
# We now restore the old cla() back and can call it directly
del self.ax.cla
self.ax.cla()
def update_normal(self, mappable=None):
"""
Update solid patches, lines, etc.
This is meant to be called when the norm of the image or contour plot
to which this colorbar belongs changes.
If the norm on the mappable is different than before, this resets the
locator and formatter for the axis, so if these have been customized,
they will need to be customized again. However, if the norm only
changes values of *vmin*, *vmax* or *cmap* then the old formatter
and locator will be preserved.
"""
if mappable:
# The mappable keyword argument exists because
# ScalarMappable.changed() emits self.callbacks.process('changed', self)
# in contrast, ColorizingArtist (and Colorizer) does not use this keyword.
# [ColorizingArtist.changed() emits self.callbacks.process('changed')]
# Also, there is no test where self.mappable == mappable is not True
# and possibly no use case.
# Therefore, the mappable keyword can be deprecated if cm.ScalarMappable
# is removed.
self.mappable = mappable
_log.debug('colorbar update normal %r %r', self.mappable.norm, self.norm)
self.set_alpha(self.mappable.get_alpha())
self.cmap = self.mappable.cmap
if self.mappable.norm != self.norm:
self.norm = self.mappable.norm
self._reset_locator_formatter_scale()
self._draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
self.stale = True
def _draw_all(self):
"""
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
"""
if self.orientation == 'vertical':
if mpl.rcParams['ytick.minor.visible']:
self.minorticks_on()
else:
if mpl.rcParams['xtick.minor.visible']:
self.minorticks_on()
self.long_axis.set(label_position=self.ticklocation,
ticks_position=self.ticklocation)
self._short_axis().set_ticks([])
self._short_axis().set_ticks([], minor=True)
# Set self._boundaries and self._values, including extensions.
# self._boundaries are the edges of each square of color, and
# self._values are the value to map into the norm to get the
# color:
self._process_values()
# Set self.vmin and self.vmax to first and last boundary, excluding
# extensions:
self.vmin, self.vmax = self._boundaries[self._inside][[0, -1]]
# Compute the X/Y mesh.
X, Y = self._mesh()
# draw the extend triangles, and shrink the inner Axes to accommodate.
# also adds the outline path to self.outline spine:
self._do_extends()
lower, upper = self.vmin, self.vmax
if self.long_axis.get_inverted():
# If the axis is inverted, we need to swap the vmin/vmax
lower, upper = upper, lower
if self.orientation == 'vertical':
self.ax.set_xlim(0, 1)
self.ax.set_ylim(lower, upper)
else:
self.ax.set_ylim(0, 1)
self.ax.set_xlim(lower, upper)
# set up the tick locators and formatters. A bit complicated because
# boundary norms + uniform spacing requires a manual locator.
self.update_ticks()
if self._filled:
ind = np.arange(len(self._values))
if self._extend_lower():
ind = ind[1:]
if self._extend_upper():
ind = ind[:-1]
self._add_solids(X, Y, self._values[ind, np.newaxis])
def _add_solids(self, X, Y, C):
"""Draw the colors; optionally add separators."""
# Cleanup previously set artists.
if self.solids is not None:
self.solids.remove()
for solid in self.solids_patches:
solid.remove()
# Add new artist(s), based on mappable type. Use individual patches if
# hatching is needed, pcolormesh otherwise.
mappable = getattr(self, 'mappable', None)
if (isinstance(mappable, contour.ContourSet)
and any(hatch is not None for hatch in mappable.hatches)):
self._add_solids_patches(X, Y, C, mappable)
else:
self.solids = self.ax.pcolormesh(
X, Y, C, cmap=self.cmap, norm=self.norm, alpha=self.alpha,
edgecolors='none', shading='flat')
if not self.drawedges:
if len(self._y) >= self.n_rasterize:
self.solids.set_rasterized(True)
self._update_dividers()
def _update_dividers(self):
if not self.drawedges:
self.dividers.set_segments([])
return
# Place all *internal* dividers.
if self.orientation == 'vertical':
lims = self.ax.get_ylim()
bounds = (lims[0] < self._y) & (self._y < lims[1])
else:
lims = self.ax.get_xlim()
bounds = (lims[0] < self._y) & (self._y < lims[1])
y = self._y[bounds]
# And then add outer dividers if extensions are on.
if self._extend_lower():
y = np.insert(y, 0, lims[0])
if self._extend_upper():
y = np.append(y, lims[1])
X, Y = np.meshgrid([0, 1], y)
if self.orientation == 'vertical':
segments = np.dstack([X, Y])
else:
segments = np.dstack([Y, X])
self.dividers.set_segments(segments)
def _add_solids_patches(self, X, Y, C, mappable):
hatches = mappable.hatches * (len(C) + 1) # Have enough hatches.
if self._extend_lower():
# remove first hatch that goes into the extend patch
hatches = hatches[1:]
patches = []
for i in range(len(X) - 1):
xy = np.array([[X[i, 0], Y[i, 1]],
[X[i, 1], Y[i, 0]],
[X[i + 1, 1], Y[i + 1, 0]],
[X[i + 1, 0], Y[i + 1, 1]]])
patch = mpatches.PathPatch(mpath.Path(xy),
facecolor=self.cmap(self.norm(C[i][0])),
hatch=hatches[i], linewidth=0,
antialiased=False, alpha=self.alpha)
self.ax.add_patch(patch)
patches.append(patch)
self.solids_patches = patches
def _do_extends(self, ax=None):
"""
Add the extend tri/rectangles on the outside of the Axes.
ax is unused, but required due to the callbacks on xlim/ylim changed
"""
# Clean up any previous extend patches
for patch in self._extend_patches:
patch.remove()
self._extend_patches = []
# extend lengths are fraction of the *inner* part of colorbar,
# not the total colorbar:
_, extendlen = self._proportional_y()
bot = 0 - (extendlen[0] if self._extend_lower() else 0)
top = 1 + (extendlen[1] if self._extend_upper() else 0)
# xyout is the outline of the colorbar including the extend patches:
if not self.extendrect:
# triangle:
xyout = np.array([[0, 0], [0.5, bot], [1, 0],
[1, 1], [0.5, top], [0, 1], [0, 0]])
else:
# rectangle:
xyout = np.array([[0, 0], [0, bot], [1, bot], [1, 0],
[1, 1], [1, top], [0, top], [0, 1],
[0, 0]])
if self.orientation == 'horizontal':
xyout = xyout[:, ::-1]
# xyout is the path for the spine:
self.outline.set_xy(xyout)
if not self._filled:
return
# Make extend triangles or rectangles filled patches. These are
# defined in the outer parent axes' coordinates:
mappable = getattr(self, 'mappable', None)
if (isinstance(mappable, contour.ContourSet)
and any(hatch is not None for hatch in mappable.hatches)):
hatches = mappable.hatches * (len(self._y) + 1)
else:
hatches = [None] * (len(self._y) + 1)
if self._extend_lower():
if not self.extendrect:
# triangle
xy = np.array([[0, 0], [0.5, bot], [1, 0]])
else:
# rectangle
xy = np.array([[0, 0], [0, bot], [1., bot], [1, 0]])
if self.orientation == 'horizontal':
xy = xy[:, ::-1]
# add the patch
val = -1 if self.long_axis.get_inverted() else 0
color = self.cmap(self.norm(self._values[val]))
patch = mpatches.PathPatch(
mpath.Path(xy), facecolor=color, alpha=self.alpha,
linewidth=0, antialiased=False,
transform=self.ax.transAxes,
hatch=hatches[0], clip_on=False,
# Place it right behind the standard patches, which is
# needed if we updated the extends
zorder=np.nextafter(self.ax.patch.zorder, -np.inf))
self.ax.add_patch(patch)
self._extend_patches.append(patch)
# remove first hatch that goes into the extend patch
hatches = hatches[1:]
if self._extend_upper():
if not self.extendrect:
# triangle
xy = np.array([[0, 1], [0.5, top], [1, 1]])
else:
# rectangle
xy = np.array([[0, 1], [0, top], [1, top], [1, 1]])
if self.orientation == 'horizontal':
xy = xy[:, ::-1]
# add the patch
val = 0 if self.long_axis.get_inverted() else -1
color = self.cmap(self.norm(self._values[val]))
hatch_idx = len(self._y) - 1
patch = mpatches.PathPatch(
mpath.Path(xy), facecolor=color, alpha=self.alpha,
linewidth=0, antialiased=False,
transform=self.ax.transAxes, hatch=hatches[hatch_idx],
clip_on=False,
# Place it right behind the standard patches, which is
# needed if we updated the extends
zorder=np.nextafter(self.ax.patch.zorder, -np.inf))
self.ax.add_patch(patch)
self._extend_patches.append(patch)
self._update_dividers()
def add_lines(self, *args, **kwargs):
"""
Draw lines on the colorbar.
The lines are appended to the list :attr:`lines`.
Parameters
----------
levels : array-like
The positions of the lines.
colors : :mpltype:`color` or list of :mpltype:`color`
Either a single color applying to all lines or one color value for
each line.
linewidths : float or array-like
Either a single linewidth applying to all lines or one linewidth
for each line.
erase : bool, default: True
Whether to remove any previously added lines.
Notes
-----
Alternatively, this method can also be called with the signature
``colorbar.add_lines(contour_set, erase=True)``, in which case
*levels*, *colors*, and *linewidths* are taken from *contour_set*.
"""
params = _api.select_matching_signature(
[lambda self, CS, erase=True: locals(),
lambda self, levels, colors, linewidths, erase=True: locals()],
self, *args, **kwargs)
if "CS" in params:
self, cs, erase = params.values()
if not isinstance(cs, contour.ContourSet) or cs.filled:
raise ValueError("If a single artist is passed to add_lines, "
"it must be a ContourSet of lines")
# TODO: Make colorbar lines auto-follow changes in contour lines.
return self.add_lines(
cs.levels,
cs.to_rgba(cs.cvalues, cs.alpha),
cs.get_linewidths(),
erase=erase)
else:
self, levels, colors, linewidths, erase = params.values()
y = self._locate(levels)
rtol = (self._y[-1] - self._y[0]) * 1e-10
igood = (y < self._y[-1] + rtol) & (y > self._y[0] - rtol)
y = y[igood]
if np.iterable(colors):
colors = np.asarray(colors)[igood]
if np.iterable(linewidths):
linewidths = np.asarray(linewidths)[igood]
X, Y = np.meshgrid([0, 1], y)
if self.orientation == 'vertical':
xy = np.stack([X, Y], axis=-1)
else:
xy = np.stack([Y, X], axis=-1)
col = collections.LineCollection(xy, linewidths=linewidths,
colors=colors)
if erase and self.lines:
for lc in self.lines:
lc.remove()
self.lines = []
self.lines.append(col)
# make a clip path that is just a linewidth bigger than the Axes...
fac = np.max(linewidths) / 72
xy = np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])
inches = self.ax.get_figure().dpi_scale_trans
# do in inches:
xy = inches.inverted().transform(self.ax.transAxes.transform(xy))
xy[[0, 1, 4], 1] -= fac
xy[[2, 3], 1] += fac
# back to axes units...
xy = self.ax.transAxes.inverted().transform(inches.transform(xy))
col.set_clip_path(mpath.Path(xy, closed=True),
self.ax.transAxes)
self.ax.add_collection(col)
self.stale = True
def update_ticks(self):
"""
Set up the ticks and ticklabels. This should not be needed by users.
"""
# Get the locator and formatter; defaults to self._locator if not None.
self._get_ticker_locator_formatter()
self.long_axis.set_major_locator(self._locator)
self.long_axis.set_minor_locator(self._minorlocator)
self.long_axis.set_major_formatter(self._formatter)
def _get_ticker_locator_formatter(self):
"""
Return the ``locator`` and ``formatter`` of the colorbar.
If they have not been defined (i.e. are *None*), the formatter and
locator are retrieved from the axis, or from the value of the
boundaries for a boundary norm.
Called by update_ticks...
"""
locator = self._locator
formatter = self._formatter
minorlocator = self._minorlocator
if isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
if locator is None:
locator = ticker.FixedLocator(b, nbins=10)
if minorlocator is None:
minorlocator = ticker.FixedLocator(b)
elif isinstance(self.norm, colors.NoNorm):
if locator is None:
# put ticks on integers between the boundaries of NoNorm
nv = len(self._values)
base = 1 + int(nv / 10)
locator = ticker.IndexLocator(base=base, offset=.5)
elif self.boundaries is not None:
b = self._boundaries[self._inside]
if locator is None:
locator = ticker.FixedLocator(b, nbins=10)
else: # most cases:
if locator is None:
# we haven't set the locator explicitly, so use the default
# for this axis:
locator = self.long_axis.get_major_locator()
if minorlocator is None:
minorlocator = self.long_axis.get_minor_locator()
if minorlocator is None:
minorlocator = ticker.NullLocator()
if formatter is None:
formatter = self.long_axis.get_major_formatter()
self._locator = locator
self._formatter = formatter
self._minorlocator = minorlocator
_log.debug('locator: %r', locator)
def set_ticks(self, ticks, *, labels=None, minor=False, **kwargs):
"""
Set tick locations.
Parameters
----------
ticks : 1D array-like
List of tick locations.
labels : list of str, optional
List of tick labels. If not set, the labels show the data value.
minor : bool, default: False
If ``False``, set the major ticks; if ``True``, the minor ticks.
**kwargs
`.Text` properties for the labels. These take effect only if you
pass *labels*. In other cases, please use `~.Axes.tick_params`.
"""
if np.iterable(ticks):
self.long_axis.set_ticks(ticks, labels=labels, minor=minor,
**kwargs)
self._locator = self.long_axis.get_major_locator()
else:
self._locator = ticks
self.long_axis.set_major_locator(self._locator)
self.stale = True
def get_ticks(self, minor=False):
"""
Return the ticks as a list of locations.
Parameters
----------
minor : boolean, default: False
if True return the minor ticks.
"""
if minor:
return self.long_axis.get_minorticklocs()
else:
return self.long_axis.get_majorticklocs()
def set_ticklabels(self, ticklabels, *, minor=False, **kwargs):
"""
[*Discouraged*] Set tick labels.
.. admonition:: Discouraged
The use of this method is discouraged, because of the dependency
on tick positions. In most cases, you'll want to use
``set_ticks(positions, labels=labels)`` instead.
If you are using this method, you should always fix the tick
positions before, e.g. by using `.Colorbar.set_ticks` or by
explicitly setting a `~.ticker.FixedLocator` on the long axis
of the colorbar. Otherwise, ticks are free to move and the
labels may end up in unexpected positions.
Parameters
----------
ticklabels : sequence of str or of `.Text`
Texts for labeling each tick location in the sequence set by
`.Colorbar.set_ticks`; the number of labels must match the number
of locations.
update_ticks : bool, default: True
This keyword argument is ignored and will be removed.
Deprecated
minor : bool
If True, set minor ticks instead of major ticks.
**kwargs
`.Text` properties for the labels.
"""
self.long_axis.set_ticklabels(ticklabels, minor=minor, **kwargs)
def minorticks_on(self):
"""
Turn on colorbar minor ticks.
"""
self.ax.minorticks_on()
self._short_axis().set_minor_locator(ticker.NullLocator())
def minorticks_off(self):
"""Turn the minor ticks of the colorbar off."""
self._minorlocator = ticker.NullLocator()
self.long_axis.set_minor_locator(self._minorlocator)
def set_label(self, label, *, loc=None, **kwargs):
"""
Add a label to the long axis of the colorbar.
Parameters
----------
label : str
The label text.
loc : str, optional
The location of the label.
- For horizontal orientation one of {'left', 'center', 'right'}
- For vertical orientation one of {'bottom', 'center', 'top'}
Defaults to :rc:`xaxis.labellocation` or :rc:`yaxis.labellocation`
depending on the orientation.
**kwargs
Keyword arguments are passed to `~.Axes.set_xlabel` /
`~.Axes.set_ylabel`.
Supported keywords are *labelpad* and `.Text` properties.
"""
if self.orientation == "vertical":
self.ax.set_ylabel(label, loc=loc, **kwargs)
else:
self.ax.set_xlabel(label, loc=loc, **kwargs)
self.stale = True
def set_alpha(self, alpha):
"""
Set the transparency between 0 (transparent) and 1 (opaque).
If an array is provided, *alpha* will be set to None to use the
transparency values associated with the colormap.
"""
self.alpha = None if isinstance(alpha, np.ndarray) else alpha
def _set_scale(self, scale, **kwargs):
"""
Set the colorbar long axis scale.
Parameters
----------
scale : {"linear", "log", "symlog", "logit", ...} or `.ScaleBase`
The axis scale type to apply.
**kwargs
Different keyword arguments are accepted, depending on the scale.
See the respective class keyword arguments:
- `matplotlib.scale.LinearScale`
- `matplotlib.scale.LogScale`
- `matplotlib.scale.SymmetricalLogScale`
- `matplotlib.scale.LogitScale`
- `matplotlib.scale.FuncScale`
- `matplotlib.scale.AsinhScale`
Notes
-----
By default, Matplotlib supports the above-mentioned scales.
Additionally, custom scales may be registered using
`matplotlib.scale.register_scale`. These scales can then also
be used here.
"""
self.long_axis._set_axes_scale(scale, **kwargs)
def remove(self):
"""
Remove this colorbar from the figure.
If the colorbar was created with ``use_gridspec=True`` the previous
gridspec is restored.
"""
if hasattr(self.ax, '_colorbar_info'):
parents = self.ax._colorbar_info['parents']
for a in parents:
if self.ax in a._colorbars:
a._colorbars.remove(self.ax)
self.ax.remove()
self.mappable.callbacks.disconnect(self.mappable.colorbar_cid)
self.mappable.colorbar = None
self.mappable.colorbar_cid = None
# Remove the extension callbacks
self.ax.callbacks.disconnect(self._extend_cid1)
self.ax.callbacks.disconnect(self._extend_cid2)
try:
ax = self.mappable.axes
except AttributeError:
return
try:
subplotspec = self.ax.get_subplotspec().get_gridspec()._subplot_spec
except AttributeError: # use_gridspec was False
pos = ax.get_position(original=True)
ax._set_position(pos)
else: # use_gridspec was True
ax.set_subplotspec(subplotspec)
def _process_values(self):
"""
Set `_boundaries` and `_values` based on the self.boundaries and
self.values if not None, or based on the size of the colormap and
the vmin/vmax of the norm.
"""
if self.values is not None:
# set self._boundaries from the values...
self._values = np.array(self.values)
if self.boundaries is None:
# bracket values by 1/2 dv:
b = np.zeros(len(self.values) + 1)
b[1:-1] = 0.5 * (self._values[:-1] + self._values[1:])
b[0] = 2.0 * b[1] - b[2]
b[-1] = 2.0 * b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# otherwise values are set from the boundaries
if isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
elif isinstance(self.norm, colors.NoNorm):
# NoNorm has N blocks, so N+1 boundaries, centered on integers:
b = np.arange(self.cmap.N + 1) - .5
elif self.boundaries is not None:
b = self.boundaries
else:
# otherwise make the boundaries from the size of the cmap:
N = self.cmap.N + 1
b, _ = self._uniform_y(N)
# add extra boundaries if needed:
if self._extend_lower():
b = np.hstack((b[0] - 1, b))
if self._extend_upper():
b = np.hstack((b, b[-1] + 1))
# transform from 0-1 to vmin-vmax:
if self.mappable.get_array() is not None:
self.mappable.autoscale_None()
if not self.norm.scaled():
# If we still aren't scaled after autoscaling, use 0, 1 as default
self.norm.vmin = 0
self.norm.vmax = 1
self.norm.vmin, self.norm.vmax = mtransforms.nonsingular(
self.norm.vmin, self.norm.vmax, expander=0.1)
if (not isinstance(self.norm, colors.BoundaryNorm) and
(self.boundaries is None)):
b = self.norm.inverse(b)
self._boundaries = np.asarray(b, dtype=float)
self._values = 0.5 * (self._boundaries[:-1] + self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
def _mesh(self):
"""
Return the coordinate arrays for the colorbar pcolormesh/patches.
These are scaled between vmin and vmax, and already handle colorbar
orientation.
"""
y, _ = self._proportional_y()
# Use the vmin and vmax of the colorbar, which may not be the same
# as the norm. There are situations where the colormap has a
# narrower range than the colorbar and we want to accommodate the
# extra contours.
if (isinstance(self.norm, (colors.BoundaryNorm, colors.NoNorm))
or self.boundaries is not None):
# not using a norm.
y = y * (self.vmax - self.vmin) + self.vmin
else:
# Update the norm values in a context manager as it is only
# a temporary change and we don't want to propagate any signals
# attached to the norm (callbacks.blocked).
with (self.norm.callbacks.blocked(),
cbook._setattr_cm(self.norm, vmin=self.vmin, vmax=self.vmax)):
y = self.norm.inverse(y)
self._y = y
X, Y = np.meshgrid([0., 1.], y)
if self.orientation == 'vertical':
return (X, Y)
else:
return (Y, X)
def _forward_boundaries(self, x):
# map boundaries equally between 0 and 1...
b = self._boundaries
y = np.interp(x, b, np.linspace(0, 1, len(b)))
# the following avoids ticks in the extends:
eps = (b[-1] - b[0]) * 1e-6
# map these _well_ out of bounds to keep any ticks out
# of the extends region...
y[x < b[0]-eps] = -1
y[x > b[-1]+eps] = 2
return y
def _inverse_boundaries(self, x):
# invert the above...
b = self._boundaries
return np.interp(x, np.linspace(0, 1, len(b)), b)
def _reset_locator_formatter_scale(self):
"""
Reset the locator et al to defaults. Any user-hardcoded changes
need to be re-entered if this gets called (either at init, or when
the mappable normal gets changed: Colorbar.update_normal)
"""
self._process_values()
self._locator = None
self._minorlocator = None
self._formatter = None
self._minorformatter = None
if (isinstance(self.mappable, contour.ContourSet) and
isinstance(self.norm, colors.LogNorm)):
# if contours have lognorm, give them a log scale...
self._set_scale('log')
elif (self.boundaries is not None or
isinstance(self.norm, colors.BoundaryNorm)):
if self.spacing == 'uniform':
funcs = (self._forward_boundaries, self._inverse_boundaries)
self._set_scale('function', functions=funcs)
elif self.spacing == 'proportional':
self._set_scale('linear')
elif getattr(self.norm, '_scale', None):
# use the norm's scale (if it exists and is not None):
self._set_scale(self.norm._scale)
elif type(self.norm) is colors.Normalize:
# plain Normalize:
self._set_scale('linear')
else:
# norm._scale is None or not an attr: derive the scale from
# the Norm:
funcs = (self.norm, self.norm.inverse)
self._set_scale('function', functions=funcs)
def _locate(self, x):
"""
Given a set of color data values, return their
corresponding colorbar data coordinates.
"""
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
xn = self.norm(x, clip=False).filled()
bunique = b[self._inside]
yunique = self._y
z = np.interp(xn, bunique, yunique)
return z
# trivial helpers
def _uniform_y(self, N):
"""
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus extension lengths if required.
"""
automin = automax = 1. / (N - 1.)
extendlength = self._get_extension_lengths(self.extendfrac,
automin, automax,
default=0.05)
y = np.linspace(0, 1, N)
return y, extendlength
def _proportional_y(self):
"""
Return colorbar data coordinates for the boundaries of
a proportional colorbar, plus extension lengths if required:
"""
if (isinstance(self.norm, colors.BoundaryNorm) or
self.boundaries is not None):
y = (self._boundaries - self._boundaries[self._inside][0])
y = y / (self._boundaries[self._inside][-1] -
self._boundaries[self._inside][0])
# need yscaled the same as the axes scale to get
# the extend lengths.
if self.spacing == 'uniform':
yscaled = self._forward_boundaries(self._boundaries)
else:
yscaled = y
else:
y = self.norm(self._boundaries.copy())
y = np.ma.filled(y, np.nan)
# the norm and the scale should be the same...
yscaled = y
y = y[self._inside]
yscaled = yscaled[self._inside]
# normalize from 0..1:
norm = colors.Normalize(y[0], y[-1])
y = np.ma.filled(norm(y), np.nan)
norm = colors.Normalize(yscaled[0], yscaled[-1])
yscaled = np.ma.filled(norm(yscaled), np.nan)
# make the lower and upper extend lengths proportional to the lengths
# of the first and last boundary spacing (if extendfrac='auto'):
automin = yscaled[1] - yscaled[0]
automax = yscaled[-1] - yscaled[-2]
extendlength = [0, 0]
if self._extend_lower() or self._extend_upper():
extendlength = self._get_extension_lengths(
self.extendfrac, automin, automax, default=0.05)
return y, extendlength
def _get_extension_lengths(self, frac, automin, automax, default=0.05):
"""
Return the lengths of colorbar extensions.
This is a helper method for _uniform_y and _proportional_y.
"""
# Set the default value.
extendlength = np.array([default, default])
if isinstance(frac, str):
_api.check_in_list(['auto'], extendfrac=frac.lower())
# Use the provided values when 'auto' is required.
extendlength[:] = [automin, automax]
elif frac is not None:
try:
# Try to set min and max extension fractions directly.
extendlength[:] = frac
# If frac is a sequence containing None then NaN may
# be encountered. This is an error.
if np.isnan(extendlength).any():
raise ValueError()
except (TypeError, ValueError) as err:
# Raise an error on encountering an invalid value for frac.
raise ValueError('invalid value for extendfrac') from err
return extendlength
def _extend_lower(self):
"""Return whether the lower limit is open ended."""
minmax = "max" if self.long_axis.get_inverted() else "min"
return self.extend in ('both', minmax)
def _extend_upper(self):
"""Return whether the upper limit is open ended."""
minmax = "min" if self.long_axis.get_inverted() else "max"
return self.extend in ('both', minmax)
def _short_axis(self):
"""Return the short axis"""
if self.orientation == 'vertical':
return self.ax.xaxis
return self.ax.yaxis
def _get_view(self):
# docstring inherited
# An interactive view for a colorbar is the norm's vmin/vmax
return self.norm.vmin, self.norm.vmax
def _set_view(self, view):
# docstring inherited
# An interactive view for a colorbar is the norm's vmin/vmax
self.norm.vmin, self.norm.vmax = view
def _set_view_from_bbox(self, bbox, direction='in',
mode=None, twinx=False, twiny=False):
# docstring inherited
# For colorbars, we use the zoom bbox to scale the norm's vmin/vmax
new_xbound, new_ybound = self.ax._prepare_view_from_bbox(
bbox, direction=direction, mode=mode, twinx=twinx, twiny=twiny)
if self.orientation == 'horizontal':
self.norm.vmin, self.norm.vmax = new_xbound
elif self.orientation == 'vertical':
self.norm.vmin, self.norm.vmax = new_ybound
def drag_pan(self, button, key, x, y):
# docstring inherited
points = self.ax._get_pan_points(button, key, x, y)
if points is not None:
if self.orientation == 'horizontal':
self.norm.vmin, self.norm.vmax = points[:, 0]
elif self.orientation == 'vertical':
self.norm.vmin, self.norm.vmax = points[:, 1]
ColorbarBase = Colorbar # Backcompat API
def _normalize_location_orientation(location, orientation):
if location is None:
location = _get_ticklocation_from_orientation(orientation)
loc_settings = _api.check_getitem({
"left": {"location": "left", "anchor": (1.0, 0.5),
"panchor": (0.0, 0.5), "pad": 0.10},
"right": {"location": "right", "anchor": (0.0, 0.5),
"panchor": (1.0, 0.5), "pad": 0.05},
"top": {"location": "top", "anchor": (0.5, 0.0),
"panchor": (0.5, 1.0), "pad": 0.05},
"bottom": {"location": "bottom", "anchor": (0.5, 1.0),
"panchor": (0.5, 0.0), "pad": 0.15},
}, location=location)
loc_settings["orientation"] = _get_orientation_from_location(location)
if orientation is not None and orientation != loc_settings["orientation"]:
# Allow the user to pass both if they are consistent.
raise TypeError("location and orientation are mutually exclusive")
return loc_settings
def _get_orientation_from_location(location):
return _api.check_getitem(
{None: None, "left": "vertical", "right": "vertical",
"top": "horizontal", "bottom": "horizontal"}, location=location)
def _get_ticklocation_from_orientation(orientation):
return _api.check_getitem(
{None: "right", "vertical": "right", "horizontal": "bottom"},
orientation=orientation)
@_docstring.interpd
def make_axes(parents, location=None, orientation=None, fraction=0.15,
shrink=1.0, aspect=20, **kwargs):
"""
Create an `~.axes.Axes` suitable for a colorbar.
The Axes is placed in the figure of the *parents* Axes, by resizing and
repositioning *parents*.
Parameters
----------
parents : `~matplotlib.axes.Axes` or iterable or `numpy.ndarray` of `~.axes.Axes`
The Axes to use as parents for placing the colorbar.
%(_make_axes_kw_doc)s
Returns
-------
cax : `~matplotlib.axes.Axes`
The child Axes.
kwargs : dict
The reduced keyword dictionary to be passed when creating the colorbar
instance.
"""
loc_settings = _normalize_location_orientation(location, orientation)
# put appropriate values into the kwargs dict for passing back to
# the Colorbar class
kwargs['orientation'] = loc_settings['orientation']
location = kwargs['ticklocation'] = loc_settings['location']
anchor = kwargs.pop('anchor', loc_settings['anchor'])
panchor = kwargs.pop('panchor', loc_settings['panchor'])
aspect0 = aspect
# turn parents into a list if it is not already. Note we cannot
# use .flatten or .ravel as these copy the references rather than
# reuse them, leading to a memory leak
if isinstance(parents, np.ndarray):
parents = list(parents.flat)
elif np.iterable(parents):
parents = list(parents)
else:
parents = [parents]
fig = parents[0].get_figure()
pad0 = 0.05 if fig.get_constrained_layout() else loc_settings['pad']
pad = kwargs.pop('pad', pad0)
if not all(fig is ax.get_figure() for ax in parents):
raise ValueError('Unable to create a colorbar Axes as not all '
'parents share the same figure.')
# take a bounding box around all of the given Axes
parents_bbox = mtransforms.Bbox.union(
[ax.get_position(original=True).frozen() for ax in parents])
pb = parents_bbox
if location in ('left', 'right'):
if location == 'left':
pbcb, _, pb1 = pb.splitx(fraction, fraction + pad)
else:
pb1, _, pbcb = pb.splitx(1 - fraction - pad, 1 - fraction)
pbcb = pbcb.shrunk(1.0, shrink).anchored(anchor, pbcb)
else:
if location == 'bottom':
pbcb, _, pb1 = pb.splity(fraction, fraction + pad)
else:
pb1, _, pbcb = pb.splity(1 - fraction - pad, 1 - fraction)
pbcb = pbcb.shrunk(shrink, 1.0).anchored(anchor, pbcb)
# define the aspect ratio in terms of y's per x rather than x's per y
aspect = 1.0 / aspect
# define a transform which takes us from old axes coordinates to
# new axes coordinates
shrinking_trans = mtransforms.BboxTransform(parents_bbox, pb1)
# transform each of the Axes in parents using the new transform
for ax in parents:
new_posn = shrinking_trans.transform(ax.get_position(original=True))
new_posn = mtransforms.Bbox(new_posn)
ax._set_position(new_posn)
if panchor is not False:
ax.set_anchor(panchor)
cax = fig.add_axes(pbcb, label="")
for a in parents:
a._colorbars.append(cax) # tell the parent it has a colorbar
cax._colorbar_info = dict(
parents=parents,
location=location,
shrink=shrink,
anchor=anchor,
panchor=panchor,
fraction=fraction,
aspect=aspect0,
pad=pad)
# and we need to set the aspect ratio by hand...
cax.set_anchor(anchor)
cax.set_box_aspect(aspect)
cax.set_aspect('auto')
return cax, kwargs
@_docstring.interpd
def make_axes_gridspec(parent, *, location=None, orientation=None,
fraction=0.15, shrink=1.0, aspect=20, **kwargs):
"""
Create an `~.axes.Axes` suitable for a colorbar.
The Axes is placed in the figure of the *parent* Axes, by resizing and
repositioning *parent*.
This function is similar to `.make_axes` and mostly compatible with it.
Primary differences are
- `.make_axes_gridspec` requires the *parent* to have a subplotspec.
- `.make_axes` positions the Axes in figure coordinates;
`.make_axes_gridspec` positions it using a subplotspec.
- `.make_axes` updates the position of the parent. `.make_axes_gridspec`
replaces the parent gridspec with a new one.
Parameters
----------
parent : `~matplotlib.axes.Axes`
The Axes to use as parent for placing the colorbar.
%(_make_axes_kw_doc)s
Returns
-------
cax : `~matplotlib.axes.Axes`
The child Axes.
kwargs : dict
The reduced keyword dictionary to be passed when creating the colorbar
instance.
"""
loc_settings = _normalize_location_orientation(location, orientation)
kwargs['orientation'] = loc_settings['orientation']
location = kwargs['ticklocation'] = loc_settings['location']
aspect0 = aspect
anchor = kwargs.pop('anchor', loc_settings['anchor'])
panchor = kwargs.pop('panchor', loc_settings['panchor'])
pad = kwargs.pop('pad', loc_settings["pad"])
wh_space = 2 * pad / (1 - pad)
if location in ('left', 'right'):
gs = parent.get_subplotspec().subgridspec(
3, 2, wspace=wh_space, hspace=0,
height_ratios=[(1-anchor[1])*(1-shrink), shrink, anchor[1]*(1-shrink)])
if location == 'left':
gs.set_width_ratios([fraction, 1 - fraction - pad])
ss_main = gs[:, 1]
ss_cb = gs[1, 0]
else:
gs.set_width_ratios([1 - fraction - pad, fraction])
ss_main = gs[:, 0]
ss_cb = gs[1, 1]
else:
gs = parent.get_subplotspec().subgridspec(
2, 3, hspace=wh_space, wspace=0,
width_ratios=[anchor[0]*(1-shrink), shrink, (1-anchor[0])*(1-shrink)])
if location == 'top':
gs.set_height_ratios([fraction, 1 - fraction - pad])
ss_main = gs[1, :]
ss_cb = gs[0, 1]
else:
gs.set_height_ratios([1 - fraction - pad, fraction])
ss_main = gs[0, :]
ss_cb = gs[1, 1]
aspect = 1 / aspect
parent.set_subplotspec(ss_main)
if panchor is not False:
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_subplot(ss_cb, label="")
parent._colorbars.append(cax) # tell the parent it has a colorbar
cax.set_anchor(anchor)
cax.set_box_aspect(aspect)
cax.set_aspect('auto')
cax._colorbar_info = dict(
location=location,
parents=[parent],
shrink=shrink,
anchor=anchor,
panchor=panchor,
fraction=fraction,
aspect=aspect0,
pad=pad)
return cax, kwargs
venv\Lib\site-packages\matplotlib\colorizer.py
"""
The Colorizer class which handles the data to color pipeline via a
normalization and a colormap.
.. admonition:: Provisional status of colorizer
The ``colorizer`` module and classes in this file are considered
provisional and may change at any time without a deprecation period.
.. seealso::
:doc:`/gallery/color/colormap_reference` for a list of builtin colormaps.
:ref:`colormap-manipulation` for examples of how to make colormaps.
:ref:`colormaps` for an in-depth discussion of choosing colormaps.
:ref:`colormapnorms` for more details about data normalization.
"""
import functools
import numpy as np
from numpy import ma
from matplotlib import _api, colors, cbook, scale, artist
import matplotlib as mpl
mpl._docstring.interpd.register(
colorizer_doc="""\
colorizer : `~matplotlib.colorizer.Colorizer` or None, default: None
The Colorizer object used to map color to data. If None, a Colorizer
object is created from a *norm* and *cmap*.""",
)
class Colorizer:
"""
Data to color pipeline.
This pipeline is accessible via `.Colorizer.to_rgba` and executed via
the `.Colorizer.norm` and `.Colorizer.cmap` attributes.
Parameters
----------
cmap: colorbar.Colorbar or str or None, default: None
The colormap used to color data.
norm: colors.Normalize or str or None, default: None
The normalization used to normalize the data
"""
def __init__(self, cmap=None, norm=None):
self._cmap = None
self._set_cmap(cmap)
self._id_norm = None
self._norm = None
self.norm = norm
self.callbacks = cbook.CallbackRegistry(signals=["changed"])
self.colorbar = None
def _scale_norm(self, norm, vmin, vmax, A):
"""
Helper for initial scaling.
Used by public functions that create a ScalarMappable and support
parameters *vmin*, *vmax* and *norm*. This makes sure that a *norm*
will take precedence over *vmin*, *vmax*.
Note that this method does not set the norm.
"""
if vmin is not None or vmax is not None:
self.set_clim(vmin, vmax)
if isinstance(norm, colors.Normalize):
raise ValueError(
"Passing a Normalize instance simultaneously with "
"vmin/vmax is not supported. Please pass vmin/vmax "
"directly to the norm when creating it.")
# always resolve the autoscaling so we have concrete limits
# rather than deferring to draw time.
self.autoscale_None(A)
@property
def norm(self):
return self._norm
@norm.setter
def norm(self, norm):
_api.check_isinstance((colors.Normalize, str, None), norm=norm)
if norm is None:
norm = colors.Normalize()
elif isinstance(norm, str):
try:
scale_cls = scale._scale_mapping[norm]
except KeyError:
raise ValueError(
"Invalid norm str name; the following values are "
f"supported: {', '.join(scale._scale_mapping)}"
) from None
norm = _auto_norm_from_scale(scale_cls)()
if norm is self.norm:
# We aren't updating anything
return
in_init = self.norm is None
# Remove the current callback and connect to the new one
if not in_init:
self.norm.callbacks.disconnect(self._id_norm)
self._norm = norm
self._id_norm = self.norm.callbacks.connect('changed',
self.changed)
if not in_init:
self.changed()
def to_rgba(self, x, alpha=None, bytes=False, norm=True):
"""
Return a normalized RGBA array corresponding to *x*.
In the normal case, *x* is a 1D or 2D sequence of scalars, and
the corresponding `~numpy.ndarray` of RGBA values will be returned,
based on the norm and colormap set for this Colorizer.
There is one special case, for handling images that are already
RGB or RGBA, such as might have been read from an image file.
If *x* is an `~numpy.ndarray` with 3 dimensions,
and the last dimension is either 3 or 4, then it will be
treated as an RGB or RGBA array, and no mapping will be done.
The array can be `~numpy.uint8`, or it can be floats with
values in the 0-1 range; otherwise a ValueError will be raised.
Any NaNs or masked elements will be set to 0 alpha.
If the last dimension is 3, the *alpha* kwarg (defaulting to 1)
will be used to fill in the transparency. If the last dimension
is 4, the *alpha* kwarg is ignored; it does not
replace the preexisting alpha. A ValueError will be raised
if the third dimension is other than 3 or 4.
In either case, if *bytes* is *False* (default), the RGBA
array will be floats in the 0-1 range; if it is *True*,
the returned RGBA array will be `~numpy.uint8` in the 0 to 255 range.
If norm is False, no normalization of the input data is
performed, and it is assumed to be in the range (0-1).
"""
# First check for special case, image input:
if isinstance(x, np.ndarray) and x.ndim == 3:
return self._pass_image_data(x, alpha, bytes, norm)
# Otherwise run norm -> colormap pipeline
x = ma.asarray(x)
if norm:
x = self.norm(x)
rgba = self.cmap(x, alpha=alpha, bytes=bytes)
return rgba
@staticmethod
def _pass_image_data(x, alpha=None, bytes=False, norm=True):
"""
Helper function to pass ndarray of shape (...,3) or (..., 4)
through `to_rgba()`, see `to_rgba()` for docstring.
"""
if x.shape[2] == 3:
if alpha is None:
alpha = 1
if x.dtype == np.uint8:
alpha = np.uint8(alpha * 255)
m, n = x.shape[:2]
xx = np.empty(shape=(m, n, 4), dtype=x.dtype)
xx[:, :, :3] = x
xx[:, :, 3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("Third dimension must be 3 or 4")
if xx.dtype.kind == 'f':
# If any of R, G, B, or A is nan, set to 0
if np.any(nans := np.isnan(x)):
if x.shape[2] == 4:
xx = xx.copy()
xx[np.any(nans, axis=2), :] = 0
if norm and (xx.max() > 1 or xx.min() < 0):
raise ValueError("Floating point image RGB values "
"must be in the 0..1 range.")
if bytes:
xx = (xx * 255).astype(np.uint8)
elif xx.dtype == np.uint8:
if not bytes:
xx = xx.astype(np.float32) / 255
else:
raise ValueError("Image RGB array must be uint8 or "
"floating point; found %s" % xx.dtype)
# Account for any masked entries in the original array
# If any of R, G, B, or A are masked for an entry, we set alpha to 0
if np.ma.is_masked(x):
xx[np.any(np.ma.getmaskarray(x), axis=2), 3] = 0
return xx
def autoscale(self, A):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if A is None:
raise TypeError('You must first set_array for mappable')
# If the norm's limits are updated self.changed() will be called
# through the callbacks attached to the norm
self.norm.autoscale(A)
def autoscale_None(self, A):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if A is None:
raise TypeError('You must first set_array for mappable')
# If the norm's limits are updated self.changed() will be called
# through the callbacks attached to the norm
self.norm.autoscale_None(A)
def _set_cmap(self, cmap):
"""
Set the colormap for luminance data.
Parameters
----------
cmap : `.Colormap` or str or None
"""
# bury import to avoid circular imports
from matplotlib import cm
in_init = self._cmap is None
self._cmap = cm._ensure_cmap(cmap)
if not in_init:
self.changed() # Things are not set up properly yet.
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._set_cmap(cmap)
def set_clim(self, vmin=None, vmax=None):
"""
Set the norm limits for image scaling.
Parameters
----------
vmin, vmax : float
The limits.
The limits may also be passed as a tuple (*vmin*, *vmax*) as a
single positional argument.
.. ACCEPTS: (vmin: float, vmax: float)
"""
# If the norm's limits are updated self.changed() will be called
# through the callbacks attached to the norm, this causes an inconsistent
# state, to prevent this blocked context manager is used
if vmax is None:
try:
vmin, vmax = vmin
except (TypeError, ValueError):
pass
orig_vmin_vmax = self.norm.vmin, self.norm.vmax
# Blocked context manager prevents callbacks from being triggered
# until both vmin and vmax are updated
with self.norm.callbacks.blocked(signal='changed'):
if vmin is not None:
self.norm.vmin = colors._sanitize_extrema(vmin)
if vmax is not None:
self.norm.vmax = colors._sanitize_extrema(vmax)
# emit a update signal if the limits are changed
if orig_vmin_vmax != (self.norm.vmin, self.norm.vmax):
self.norm.callbacks.process('changed')
def get_clim(self):
"""
Return the values (min, max) that are mapped to the colormap limits.
"""
return self.norm.vmin, self.norm.vmax
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal.
"""
self.callbacks.process('changed')
self.stale = True
@property
def vmin(self):
return self.get_clim()[0]
@vmin.setter
def vmin(self, vmin):
self.set_clim(vmin=vmin)
@property
def vmax(self):
return self.get_clim()[1]
@vmax.setter
def vmax(self, vmax):
self.set_clim(vmax=vmax)
@property
def clip(self):
return self.norm.clip
@clip.setter
def clip(self, clip):
self.norm.clip = clip
class _ColorizerInterface:
"""
Base class that contains the interface to `Colorizer` objects from
a `ColorizingArtist` or `.cm.ScalarMappable`.
Note: This class only contain functions that interface the .colorizer
attribute. Other functions that as shared between `.ColorizingArtist`
and `.cm.ScalarMappable` are not included.
"""
def _scale_norm(self, norm, vmin, vmax):
self._colorizer._scale_norm(norm, vmin, vmax, self._A)
def to_rgba(self, x, alpha=None, bytes=False, norm=True):
"""
Return a normalized RGBA array corresponding to *x*.
In the normal case, *x* is a 1D or 2D sequence of scalars, and
the corresponding `~numpy.ndarray` of RGBA values will be returned,
based on the norm and colormap set for this Colorizer.
There is one special case, for handling images that are already
RGB or RGBA, such as might have been read from an image file.
If *x* is an `~numpy.ndarray` with 3 dimensions,
and the last dimension is either 3 or 4, then it will be
treated as an RGB or RGBA array, and no mapping will be done.
The array can be `~numpy.uint8`, or it can be floats with
values in the 0-1 range; otherwise a ValueError will be raised.
Any NaNs or masked elements will be set to 0 alpha.
If the last dimension is 3, the *alpha* kwarg (defaulting to 1)
will be used to fill in the transparency. If the last dimension
is 4, the *alpha* kwarg is ignored; it does not
replace the preexisting alpha. A ValueError will be raised
if the third dimension is other than 3 or 4.
In either case, if *bytes* is *False* (default), the RGBA
array will be floats in the 0-1 range; if it is *True*,
the returned RGBA array will be `~numpy.uint8` in the 0 to 255 range.
If norm is False, no normalization of the input data is
performed, and it is assumed to be in the range (0-1).
"""
return self._colorizer.to_rgba(x, alpha=alpha, bytes=bytes, norm=norm)
def get_clim(self):
"""
Return the values (min, max) that are mapped to the colormap limits.
"""
return self._colorizer.get_clim()
def set_clim(self, vmin=None, vmax=None):
"""
Set the norm limits for image scaling.
Parameters
----------
vmin, vmax : float
The limits.
For scalar data, the limits may also be passed as a
tuple (*vmin*, *vmax*) as a single positional argument.
.. ACCEPTS: (vmin: float, vmax: float)
"""
# If the norm's limits are updated self.changed() will be called
# through the callbacks attached to the norm
self._colorizer.set_clim(vmin, vmax)
def get_alpha(self):
try:
return super().get_alpha()
except AttributeError:
return 1
@property
def cmap(self):
return self._colorizer.cmap
@cmap.setter
def cmap(self, cmap):
self._colorizer.cmap = cmap
def get_cmap(self):
"""Return the `.Colormap` instance."""
return self._colorizer.cmap
def set_cmap(self, cmap):
"""
Set the colormap for luminance data.
Parameters
----------
cmap : `.Colormap` or str or None
"""
self.cmap = cmap
@property
def norm(self):
return self._colorizer.norm
@norm.setter
def norm(self, norm):
self._colorizer.norm = norm
def set_norm(self, norm):
"""
Set the normalization instance.
Parameters
----------
norm : `.Normalize` or str or None
Notes
-----
If there are any colorbars using the mappable for this norm, setting
the norm of the mappable will reset the norm, locator, and formatters
on the colorbar to default.
"""
self.norm = norm
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
self._colorizer.autoscale(self._A)
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
self._colorizer.autoscale_None(self._A)
@property
def colorbar(self):
"""
The last colorbar associated with this object. May be None
"""
return self._colorizer.colorbar
@colorbar.setter
def colorbar(self, colorbar):
self._colorizer.colorbar = colorbar
def _format_cursor_data_override(self, data):
# This function overwrites Artist.format_cursor_data(). We cannot
# implement cm.ScalarMappable.format_cursor_data() directly, because
# most cm.ScalarMappable subclasses inherit from Artist first and from
# cm.ScalarMappable second, so Artist.format_cursor_data would always
# have precedence over cm.ScalarMappable.format_cursor_data.
# Note if cm.ScalarMappable is depreciated, this functionality should be
# implemented as format_cursor_data() on ColorizingArtist.
n = self.cmap.N
if np.ma.getmask(data):
return "[]"
normed = self.norm(data)
if np.isfinite(normed):
if isinstance(self.norm, colors.BoundaryNorm):
# not an invertible normalization mapping
cur_idx = np.argmin(np.abs(self.norm.boundaries - data))
neigh_idx = max(0, cur_idx - 1)
# use max diff to prevent delta == 0
delta = np.diff(
self.norm.boundaries[neigh_idx:cur_idx + 2]
).max()
elif self.norm.vmin == self.norm.vmax:
# singular norms, use delta of 10% of only value
delta = np.abs(self.norm.vmin * .1)
else:
# Midpoints of neighboring color intervals.
neighbors = self.norm.inverse(
(int(normed * n) + np.array([0, 1])) / n)
delta = abs(neighbors - data).max()
g_sig_digits = cbook._g_sig_digits(data, delta)
else:
g_sig_digits = 3 # Consistent with default below.
return f"[{data:-#.{g_sig_digits}g}]"
class _ScalarMappable(_ColorizerInterface):
"""
A mixin class to map one or multiple sets of scalar data to RGBA.
The ScalarMappable applies data normalization before returning RGBA colors from
the given `~matplotlib.colors.Colormap`.
"""
# _ScalarMappable exists for compatibility with
# code written before the introduction of the Colorizer
# and ColorizingArtist classes.
# _ScalarMappable can be depreciated so that ColorizingArtist
# inherits directly from _ColorizerInterface.
# in this case, the following changes should occur:
# __init__() has its functionality moved to ColorizingArtist.
# set_array(), get_array(), _get_colorizer() and
# _check_exclusionary_keywords() are moved to ColorizingArtist.
# changed() can be removed so long as colorbar.Colorbar
# is changed to connect to the colorizer instead of the
# ScalarMappable/ColorizingArtist,
# otherwise changed() can be moved to ColorizingArtist.
def __init__(self, norm=None, cmap=None, *, colorizer=None, **kwargs):
"""
Parameters
----------
norm : `.Normalize` (or subclass thereof) or str or None
The normalizing object which scales data, typically into the
interval ``[0, 1]``.
If a `str`, a `.Normalize` subclass is dynamically generated based
on the scale with the corresponding name.
If *None*, *norm* defaults to a *colors.Normalize* object which
initializes its scaling based on the first data processed.
cmap : str or `~matplotlib.colors.Colormap`
The colormap used to map normalized data values to RGBA colors.
"""
super().__init__(**kwargs)
self._A = None
self._colorizer = self._get_colorizer(colorizer=colorizer, norm=norm, cmap=cmap)
self.colorbar = None
self._id_colorizer = self._colorizer.callbacks.connect('changed', self.changed)
self.callbacks = cbook.CallbackRegistry(signals=["changed"])
def set_array(self, A):
"""
Set the value array from array-like *A*.
Parameters
----------
A : array-like or None
The values that are mapped to colors.
The base class `.ScalarMappable` does not make any assumptions on
the dimensionality and shape of the value array *A*.
"""
if A is None:
self._A = None
return
A = cbook.safe_masked_invalid(A, copy=True)
if not np.can_cast(A.dtype, float, "same_kind"):
raise TypeError(f"Image data of dtype {A.dtype} cannot be "
"converted to float")
self._A = A
if not self.norm.scaled():
self._colorizer.autoscale_None(A)
def get_array(self):
"""
Return the array of values, that are mapped to colors.
The base class `.ScalarMappable` does not make any assumptions on
the dimensionality and shape of the array.
"""
return self._A
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal.
"""
self.callbacks.process('changed', self)
self.stale = True
@staticmethod
def _check_exclusionary_keywords(colorizer, **kwargs):
"""
Raises a ValueError if any kwarg is not None while colorizer is not None
"""
if colorizer is not None:
if any([val is not None for val in kwargs.values()]):
raise ValueError("The `colorizer` keyword cannot be used simultaneously"
" with any of the following keywords: "
+ ", ".join(f'`{key}`' for key in kwargs.keys()))
@staticmethod
def _get_colorizer(cmap, norm, colorizer):
if isinstance(colorizer, Colorizer):
_ScalarMappable._check_exclusionary_keywords(
Colorizer, cmap=cmap, norm=norm
)
return colorizer
return Colorizer(cmap, norm)
# The docstrings here must be generic enough to apply to all relevant methods.
mpl._docstring.interpd.register(
cmap_doc="""\
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The Colormap instance or registered colormap name used to map scalar data
to colors.""",
norm_doc="""\
norm : str or `~matplotlib.colors.Normalize`, optional
The normalization method used to scale scalar data to the [0, 1] range
before mapping to colors using *cmap*. By default, a linear scaling is
used, mapping the lowest value to 0 and the highest to 1.
If given, this can be one of the following:
- An instance of `.Normalize` or one of its subclasses
(see :ref:`colormapnorms`).
- A scale name, i.e. one of "linear", "log", "symlog", "logit", etc. For a
list of available scales, call `matplotlib.scale.get_scale_names()`.
In that case, a suitable `.Normalize` subclass is dynamically generated
and instantiated.""",
vmin_vmax_doc="""\
vmin, vmax : float, optional
When using scalar data and no explicit *norm*, *vmin* and *vmax* define
the data range that the colormap covers. By default, the colormap covers
the complete value range of the supplied data. It is an error to use
*vmin*/*vmax* when a *norm* instance is given (but using a `str` *norm*
name together with *vmin*/*vmax* is acceptable).""",
)
class ColorizingArtist(_ScalarMappable, artist.Artist):
"""
Base class for artists that make map data to color using a `.colorizer.Colorizer`.
The `.colorizer.Colorizer` applies data normalization before
returning RGBA colors from a `~matplotlib.colors.Colormap`.
"""
def __init__(self, colorizer, **kwargs):
"""
Parameters
----------
colorizer : `.colorizer.Colorizer`
"""
_api.check_isinstance(Colorizer, colorizer=colorizer)
super().__init__(colorizer=colorizer, **kwargs)
@property
def colorizer(self):
return self._colorizer
@colorizer.setter
def colorizer(self, cl):
_api.check_isinstance(Colorizer, colorizer=cl)
self._colorizer.callbacks.disconnect(self._id_colorizer)
self._colorizer = cl
self._id_colorizer = cl.callbacks.connect('changed', self.changed)
def _set_colorizer_check_keywords(self, colorizer, **kwargs):
"""
Raises a ValueError if any kwarg is not None while colorizer is not None.
"""
self._check_exclusionary_keywords(colorizer, **kwargs)
self.colorizer = colorizer
def _auto_norm_from_scale(scale_cls):
"""
Automatically generate a norm class from *scale_cls*.
This differs from `.colors.make_norm_from_scale` in the following points:
- This function is not a class decorator, but directly returns a norm class
(as if decorating `.Normalize`).
- The scale is automatically constructed with ``nonpositive="mask"``, if it
supports such a parameter, to work around the difference in defaults
between standard scales (which use "clip") and norms (which use "mask").
Note that ``make_norm_from_scale`` caches the generated norm classes
(not the instances) and reuses them for later calls. For example,
``type(_auto_norm_from_scale("log")) == LogNorm``.
"""
# Actually try to construct an instance, to verify whether
# ``nonpositive="mask"`` is supported.
try:
norm = colors.make_norm_from_scale(
functools.partial(scale_cls, nonpositive="mask"))(
colors.Normalize)()
except TypeError:
norm = colors.make_norm_from_scale(scale_cls)(
colors.Normalize)()
return type(norm)
venv\Lib\site-packages\matplotlib\colors.py
"""
A module for converting numbers or color arguments to *RGB* or *RGBA*.
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification conversions,
and for mapping numbers to colors in a 1-D array of colors called a colormap.
Mapping data onto colors using a colormap typically involves two steps: a data
array is first mapped onto the range 0-1 using a subclass of `Normalize`,
then this number is mapped to a color using a subclass of `Colormap`. Two
subclasses of `Colormap` provided here: `LinearSegmentedColormap`, which uses
piecewise-linear interpolation to define colormaps, and `ListedColormap`, which
makes a colormap from a list of colors.
.. seealso::
:ref:`colormap-manipulation` for examples of how to
make colormaps and
:ref:`colormaps` for a list of built-in colormaps.
:ref:`colormapnorms` for more details about data
normalization
More colormaps are available at palettable_.
The module also provides functions for checking whether an object can be
interpreted as a color (`is_color_like`), for converting such an object
to an RGBA tuple (`to_rgba`) or to an HTML-like hex string in the
"#rrggbb" format (`to_hex`), and a sequence of colors to an (n, 4)
RGBA array (`to_rgba_array`). Caching is used for efficiency.
Colors that Matplotlib recognizes are listed at
:ref:`colors_def`.
.. _palettable: https://jiffyclub.github.io/palettable/
.. _xkcd color survey: https://xkcd.com/color/rgb/
"""
import base64
from collections.abc import Sized, Sequence, Mapping
import functools
import importlib
import inspect
import io
import itertools
from numbers import Real
import re
from PIL import Image
from PIL.PngImagePlugin import PngInfo
import matplotlib as mpl
import numpy as np
from matplotlib import _api, _cm, cbook, scale, _image
from ._color_data import BASE_COLORS, TABLEAU_COLORS, CSS4_COLORS, XKCD_COLORS
class _ColorMapping(dict):
def __init__(self, mapping):
super().__init__(mapping)
self.cache = {}
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.cache.clear()
def __delitem__(self, key):
super().__delitem__(key)
self.cache.clear()
_colors_full_map = {}
# Set by reverse priority order.
_colors_full_map.update(XKCD_COLORS)
_colors_full_map.update({k.replace('grey', 'gray'): v
for k, v in XKCD_COLORS.items()
if 'grey' in k})
_colors_full_map.update(CSS4_COLORS)
_colors_full_map.update(TABLEAU_COLORS)
_colors_full_map.update({k.replace('gray', 'grey'): v
for k, v in TABLEAU_COLORS.items()
if 'gray' in k})
_colors_full_map.update(BASE_COLORS)
_colors_full_map = _ColorMapping(_colors_full_map)
_REPR_PNG_SIZE = (512, 64)
_BIVAR_REPR_PNG_SIZE = 256
def get_named_colors_mapping():
"""Return the global mapping of names to named colors."""
return _colors_full_map
class ColorSequenceRegistry(Mapping):
r"""
Container for sequences of colors that are known to Matplotlib by name.
The universal registry instance is `matplotlib.color_sequences`. There
should be no need for users to instantiate `.ColorSequenceRegistry`
themselves.
Read access uses a dict-like interface mapping names to lists of colors::
import matplotlib as mpl
colors = mpl.color_sequences['tab10']
For a list of built in color sequences, see :doc:`/gallery/color/color_sequences`.
The returned lists are copies, so that their modification does not change
the global definition of the color sequence.
Additional color sequences can be added via
`.ColorSequenceRegistry.register`::
mpl.color_sequences.register('rgb', ['r', 'g', 'b'])
"""
_BUILTIN_COLOR_SEQUENCES = {
'tab10': _cm._tab10_data,
'tab20': _cm._tab20_data,
'tab20b': _cm._tab20b_data,
'tab20c': _cm._tab20c_data,
'Pastel1': _cm._Pastel1_data,
'Pastel2': _cm._Pastel2_data,
'Paired': _cm._Paired_data,
'Accent': _cm._Accent_data,
'Dark2': _cm._Dark2_data,
'Set1': _cm._Set1_data,
'Set2': _cm._Set2_data,
'Set3': _cm._Set3_data,
'petroff10': _cm._petroff10_data,
}
def __init__(self):
self._color_sequences = {**self._BUILTIN_COLOR_SEQUENCES}
def __getitem__(self, item):
try:
return list(self._color_sequences[item])
except KeyError:
raise KeyError(f"{item!r} is not a known color sequence name")
def __iter__(self):
return iter(self._color_sequences)
def __len__(self):
return len(self._color_sequences)
def __str__(self):
return ('ColorSequenceRegistry; available colormaps:\n' +
', '.join(f"'{name}'" for name in self))
def register(self, name, color_list):
"""
Register a new color sequence.
The color sequence registry stores a copy of the given *color_list*, so
that future changes to the original list do not affect the registered
color sequence. Think of this as the registry taking a snapshot
of *color_list* at registration.
Parameters
----------
name : str
The name for the color sequence.
color_list : list of :mpltype:`color`
An iterable returning valid Matplotlib colors when iterating over.
Note however that the returned color sequence will always be a
list regardless of the input type.
"""
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(f"{name!r} is a reserved name for a builtin "
"color sequence")
color_list = list(color_list) # force copy and coerce type to list
for color in color_list:
try:
to_rgba(color)
except ValueError:
raise ValueError(
f"{color!r} is not a valid color specification")
self._color_sequences[name] = color_list
def unregister(self, name):
"""
Remove a sequence from the registry.
You cannot remove built-in color sequences.
If the name is not registered, returns with no error.
"""
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(
f"Cannot unregister builtin color sequence {name!r}")
self._color_sequences.pop(name, None)
_color_sequences = ColorSequenceRegistry()
def _sanitize_extrema(ex):
if ex is None:
return ex
try:
ret = ex.item()
except AttributeError:
ret = float(ex)
return ret
_nth_color_re = re.compile(r"\AC[0-9]+\Z")
def _is_nth_color(c):
"""Return whether *c* can be interpreted as an item in the color cycle."""
return isinstance(c, str) and _nth_color_re.match(c)
def is_color_like(c):
"""Return whether *c* can be interpreted as an RGB(A) color."""
# Special-case nth color syntax because it cannot be parsed during setup.
if _is_nth_color(c):
return True
try:
to_rgba(c)
except (TypeError, ValueError):
return False
else:
return True
def _has_alpha_channel(c):
"""Return whether *c* is a color with an alpha channel."""
# 4-element sequences are interpreted as r, g, b, a
return not isinstance(c, str) and len(c) == 4
def _check_color_like(**kwargs):
"""
For each *key, value* pair in *kwargs*, check that *value* is color-like.
"""
for k, v in kwargs.items():
if not is_color_like(v):
raise ValueError(
f"{v!r} is not a valid value for {k}: supported inputs are "
f"(r, g, b) and (r, g, b, a) 0-1 float tuples; "
f"'#rrggbb', '#rrggbbaa', '#rgb', '#rgba' strings; "
f"named color strings; "
f"string reprs of 0-1 floats for grayscale values; "
f"'C0', 'C1', ... strings for colors of the color cycle; "
f"and pairs combining one of the above with an alpha value")
def same_color(c1, c2):
"""
Return whether the colors *c1* and *c2* are the same.
*c1*, *c2* can be single colors or lists/arrays of colors.
"""
c1 = to_rgba_array(c1)
c2 = to_rgba_array(c2)
n1 = max(c1.shape[0], 1) # 'none' results in shape (0, 4), but is 1-elem
n2 = max(c2.shape[0], 1) # 'none' results in shape (0, 4), but is 1-elem
if n1 != n2:
raise ValueError('Different number of elements passed.')
# The following shape test is needed to correctly handle comparisons with
# 'none', which results in a shape (0, 4) array and thus cannot be tested
# via value comparison.
return c1.shape == c2.shape and (c1 == c2).all()
def to_rgba(c, alpha=None):
"""
Convert *c* to an RGBA color.
Parameters
----------
c : Matplotlib color or ``np.ma.masked``
alpha : float, optional
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*.
If None, the alpha value from *c* is used. If *c* does not have an
alpha channel, then alpha defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
Returns
-------
tuple
Tuple of floats ``(r, g, b, a)``, where each channel (red, green, blue,
alpha) can assume values between 0 and 1.
"""
if isinstance(c, tuple) and len(c) == 2:
if alpha is None:
c, alpha = c
else:
c = c[0]
# Special-case nth color syntax because it should not be cached.
if _is_nth_color(c):
prop_cycler = mpl.rcParams['axes.prop_cycle']
colors = prop_cycler.by_key().get('color', ['k'])
c = colors[int(c[1:]) % len(colors)]
try:
rgba = _colors_full_map.cache[c, alpha]
except (KeyError, TypeError): # Not in cache, or unhashable.
rgba = None
if rgba is None: # Suppress exception chaining of cache lookup failure.
rgba = _to_rgba_no_colorcycle(c, alpha)
try:
_colors_full_map.cache[c, alpha] = rgba
except TypeError:
pass
return rgba
def _to_rgba_no_colorcycle(c, alpha=None):
"""
Convert *c* to an RGBA color, with no support for color-cycle syntax.
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*. Otherwise, the alpha value from *c* is used, if it has alpha
information, or defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
"""
if alpha is not None and not 0 <= alpha <= 1:
raise ValueError("'alpha' must be between 0 and 1, inclusive")
orig_c = c
if c is np.ma.masked:
return (0., 0., 0., 0.)
if isinstance(c, str):
if c.lower() == "none":
return (0., 0., 0., 0.)
# Named color.
try:
# This may turn c into a non-string, so we check again below.
c = _colors_full_map[c]
except KeyError:
if len(orig_c) != 1:
try:
c = _colors_full_map[c.lower()]
except KeyError:
pass
if isinstance(c, str):
# hex color in #rrggbb format.
match = re.match(r"\A#[a-fA-F0-9]{6}\Z", c)
if match:
return (tuple(int(n, 16) / 255
for n in [c[1:3], c[3:5], c[5:7]])
+ (alpha if alpha is not None else 1.,))
# hex color in #rgb format, shorthand for #rrggbb.
match = re.match(r"\A#[a-fA-F0-9]{3}\Z", c)
if match:
return (tuple(int(n, 16) / 255
for n in [c[1]*2, c[2]*2, c[3]*2])
+ (alpha if alpha is not None else 1.,))
# hex color with alpha in #rrggbbaa format.
match = re.match(r"\A#[a-fA-F0-9]{8}\Z", c)
if match:
color = [int(n, 16) / 255
for n in [c[1:3], c[3:5], c[5:7], c[7:9]]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
# hex color with alpha in #rgba format, shorthand for #rrggbbaa.
match = re.match(r"\A#[a-fA-F0-9]{4}\Z", c)
if match:
color = [int(n, 16) / 255
for n in [c[1]*2, c[2]*2, c[3]*2, c[4]*2]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
# string gray.
try:
c = float(c)
except ValueError:
pass
else:
if not (0 <= c <= 1):
raise ValueError(
f"Invalid string grayscale value {orig_c!r}. "
f"Value must be within 0-1 range")
return c, c, c, alpha if alpha is not None else 1.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# turn 2-D array into 1-D array
if isinstance(c, np.ndarray):
if c.ndim == 2 and c.shape[0] == 1:
c = c.reshape(-1)
# tuple color.
if not np.iterable(c):
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
if len(c) not in [3, 4]:
raise ValueError("RGBA sequence should have length 3 or 4")
if not all(isinstance(x, Real) for x in c):
# Checks that don't work: `map(float, ...)`, `np.array(..., float)` and
# `np.array(...).astype(float)` would all convert "0.5" to 0.5.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# Return a tuple to prevent the cached value from being modified.
c = tuple(map(float, c))
if len(c) == 3 and alpha is None:
alpha = 1
if alpha is not None:
c = c[:3] + (alpha,)
if any(elem < 0 or elem > 1 for elem in c):
raise ValueError("RGBA values should be within 0-1 range")
return c
def to_rgba_array(c, alpha=None):
"""
Convert *c* to a (n, 4) array of RGBA colors.
Parameters
----------
c : Matplotlib color or array of colors
If *c* is a masked array, an `~numpy.ndarray` is returned with a
(0, 0, 0, 0) row for each masked value or row in *c*.
alpha : float or sequence of floats, optional
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*.
If None, the alpha value from *c* is used. If *c* does not have an
alpha channel, then alpha defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
If *alpha* is a sequence and *c* is a single color, *c* will be
repeated to match the length of *alpha*.
Returns
-------
array
(n, 4) array of RGBA colors, where each channel (red, green, blue,
alpha) can assume values between 0 and 1.
"""
if isinstance(c, tuple) and len(c) == 2 and isinstance(c[1], Real):
if alpha is None:
c, alpha = c
else:
c = c[0]
# Special-case inputs that are already arrays, for performance. (If the
# array has the wrong kind or shape, raise the error during one-at-a-time
# conversion.)
if np.iterable(alpha):
alpha = np.asarray(alpha).ravel()
if (isinstance(c, np.ndarray) and c.dtype.kind in "if"
and c.ndim == 2 and c.shape[1] in [3, 4]):
mask = c.mask.any(axis=1) if np.ma.is_masked(c) else None
c = np.ma.getdata(c)
if np.iterable(alpha):
if c.shape[0] == 1 and alpha.shape[0] > 1:
c = np.tile(c, (alpha.shape[0], 1))
elif c.shape[0] != alpha.shape[0]:
raise ValueError("The number of colors must match the number"
" of alpha values if there are more than one"
" of each.")
if c.shape[1] == 3:
result = np.column_stack([c, np.zeros(len(c))])
result[:, -1] = alpha if alpha is not None else 1.
elif c.shape[1] == 4:
result = c.copy()
if alpha is not None:
result[:, -1] = alpha
if mask is not None:
result[mask] = 0
if np.any((result < 0) | (result > 1)):
raise ValueError("RGBA values should be within 0-1 range")
return result
# Handle single values.
# Note that this occurs *after* handling inputs that are already arrays, as
# `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need
# to format the array in the ValueError message(!).
if cbook._str_lower_equal(c, "none"):
return np.zeros((0, 4), float)
try:
if np.iterable(alpha):
return np.array([to_rgba(c, a) for a in alpha], float)
else:
return np.array([to_rgba(c, alpha)], float)
except TypeError:
pass
except ValueError as e:
if e.args == ("'alpha' must be between 0 and 1, inclusive", ):
# ValueError is from _to_rgba_no_colorcycle().
raise e
if isinstance(c, str):
raise ValueError(f"{c!r} is not a valid color value.")
if len(c) == 0:
return np.zeros((0, 4), float)
# Quick path if the whole sequence can be directly converted to a numpy
# array in one shot.
if isinstance(c, Sequence):
lens = {len(cc) if isinstance(cc, (list, tuple)) else -1 for cc in c}
if lens == {3}:
rgba = np.column_stack([c, np.ones(len(c))])
elif lens == {4}:
rgba = np.array(c)
else:
rgba = np.array([to_rgba(cc) for cc in c])
else:
rgba = np.array([to_rgba(cc) for cc in c])
if alpha is not None:
rgba[:, 3] = alpha
if isinstance(c, Sequence):
# ensure that an explicit alpha does not overwrite full transparency
# for "none"
none_mask = [cbook._str_equal(cc, "none") for cc in c]
rgba[:, 3][none_mask] = 0
return rgba
def to_rgb(c):
"""Convert *c* to an RGB color, silently dropping the alpha channel."""
return to_rgba(c)[:3]
def to_hex(c, keep_alpha=False):
"""
Convert *c* to a hex color.
Parameters
----------
c : :ref:`color ` or `numpy.ma.masked`
keep_alpha : bool, default: False
If False, use the ``#rrggbb`` format, otherwise use ``#rrggbbaa``.
Returns
-------
str
``#rrggbb`` or ``#rrggbbaa`` hex color string
"""
c = to_rgba(c)
if not keep_alpha:
c = c[:3]
return "#" + "".join(format(round(val * 255), "02x") for val in c)
### Backwards-compatible color-conversion API
cnames = CSS4_COLORS
hexColorPattern = re.compile(r"\A#[a-fA-F0-9]{6}\Z")
rgb2hex = to_hex
hex2color = to_rgb
class ColorConverter:
"""
A class only kept for backwards compatibility.
Its functionality is entirely provided by module-level functions.
"""
colors = _colors_full_map
cache = _colors_full_map.cache
to_rgb = staticmethod(to_rgb)
to_rgba = staticmethod(to_rgba)
to_rgba_array = staticmethod(to_rgba_array)
colorConverter = ColorConverter()
### End of backwards-compatible color-conversion API
def _create_lookup_table(N, data, gamma=1.0):
r"""
Create an *N* -element 1D lookup table.
This assumes a mapping :math:`f : [0, 1] \rightarrow [0, 1]`. The returned
data is an array of N values :math:`y = f(x)` where x is sampled from
[0, 1].
By default (*gamma* = 1) x is equidistantly sampled from [0, 1]. The
*gamma* correction factor :math:`\gamma` distorts this equidistant
sampling by :math:`x \rightarrow x^\gamma`.
Parameters
----------
N : int
The number of elements of the created lookup table; at least 1.
data : (M, 3) array-like or callable
Defines the mapping :math:`f`.
If a (M, 3) array-like, the rows define values (x, y0, y1). The x
values must start with x=0, end with x=1, and all x values be in
increasing order.
A value between :math:`x_i` and :math:`x_{i+1}` is mapped to the range
:math:`y^1_{i-1} \ldots y^0_i` by linear interpolation.
For the simple case of a y-continuous mapping, y0 and y1 are identical.
The two values of y are to allow for discontinuous mapping functions.
E.g. a sawtooth with a period of 0.2 and an amplitude of 1 would be::
[(0, 1, 0), (0.2, 1, 0), (0.4, 1, 0), ..., [(1, 1, 0)]
In the special case of ``N == 1``, by convention the returned value
is y0 for x == 1.
If *data* is a callable, it must accept and return numpy arrays::
data(x : ndarray) -> ndarray
and map values between 0 - 1 to 0 - 1.
gamma : float
Gamma correction factor for input distribution x of the mapping.
See also https://en.wikipedia.org/wiki/Gamma_correction.
Returns
-------
array
The lookup table where ``lut[x * (N-1)]`` gives the closest value
for values of x between 0 and 1.
Notes
-----
This function is internally used for `.LinearSegmentedColormap`.
"""
if callable(data):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=float), 0, 1)
return lut
try:
adata = np.array(data)
except Exception as err:
raise TypeError("data must be convertible to an array") from err
_api.check_shape((None, 3), data=adata)
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0 and end with x=1")
if (np.diff(x) < 0).any():
raise ValueError("data mapping points must have x in increasing order")
# begin generation of lookup table
if N == 1:
# convention: use the y = f(x=1) value for a 1-element lookup table
lut = np.array(y0[-1])
else:
x = x * (N - 1)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
lut = np.concatenate([
[y1[0]],
distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1],
[y0[-1]],
])
# ensure that the lut is confined to values between 0 and 1 by clipping it
return np.clip(lut, 0.0, 1.0)
class Colormap:
"""
Baseclass for all scalar to RGBA mappings.
Typically, Colormap instances are used to convert data values (floats)
from the interval ``[0, 1]`` to the RGBA color that the respective
Colormap represents. For scaling of data into the ``[0, 1]`` interval see
`matplotlib.colors.Normalize`. Subclasses of `matplotlib.cm.ScalarMappable`
make heavy use of this ``data -> normalize -> map-to-color`` processing
chain.
"""
def __init__(self, name, N=256):
"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of RGB quantization levels.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
self.n_variates = 1
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: `matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
r"""
Parameters
----------
X : float or int or array-like
The data value(s) to convert to RGBA.
For floats, *X* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, *X* should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float or array-like or None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching X, or None.
bytes : bool, default: False
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be `numpy.uint8`\s in the
interval ``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, otherwise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
rgba, mask = self._get_rgba_and_mask(X, alpha=alpha, bytes=bytes)
if not np.iterable(X):
rgba = tuple(rgba)
return rgba
def _get_rgba_and_mask(self, X, alpha=None, bytes=False):
r"""
Parameters
----------
X : float or int or array-like
The data value(s) to convert to RGBA.
For floats, *X* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, *X* should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float or array-like or None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching X, or None.
bytes : bool, default: False
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be `numpy.uint8`\s in the
interval ``[0, 255]``.
Returns
-------
colors : np.ndarray
Array of RGBA values with a shape of ``X.shape + (4, )``.
mask : np.ndarray
Boolean array with True where the input is ``np.nan`` or masked.
"""
if not self._isinit:
self._init()
xa = np.array(X, copy=True)
if not xa.dtype.isnative:
# Native byteorder is faster.
xa = xa.byteswap().view(xa.dtype.newbyteorder())
if xa.dtype.kind == "f":
xa *= self.N
# xa == 1 (== N after multiplication) is not out of range.
xa[xa == self.N] = self.N - 1
# Pre-compute the masks before casting to int (which can truncate
# negative values to zero or wrap large floats to negative ints).
mask_under = xa < 0
mask_over = xa >= self.N
# If input was masked, get the bad mask from it; else mask out nans.
mask_bad = X.mask if np.ma.is_masked(X) else np.isnan(xa)
with np.errstate(invalid="ignore"):
# We need this cast for unsigned ints as well as floats
xa = xa.astype(int)
xa[mask_under] = self._i_under
xa[mask_over] = self._i_over
xa[mask_bad] = self._i_bad
lut = self._lut
if bytes:
lut = (lut * 255).astype(np.uint8)
rgba = lut.take(xa, axis=0, mode='clip')
if alpha is not None:
alpha = np.clip(alpha, 0, 1)
if bytes:
alpha *= 255 # Will be cast to uint8 upon assignment.
if alpha.shape not in [(), xa.shape]:
raise ValueError(
f"alpha is array-like but its shape {alpha.shape} does "
f"not match that of X {xa.shape}")
rgba[..., -1] = alpha
# If the "bad" color is all zeros, then ignore alpha input.
if (lut[-1] == 0).all():
rgba[mask_bad] = (0, 0, 0, 0)
return rgba, mask_bad
def __copy__(self):
cls = self.__class__
cmapobject = cls.__new__(cls)
cmapobject.__dict__.update(self.__dict__)
if self._isinit:
cmapobject._lut = np.copy(self._lut)
return cmapobject
def __eq__(self, other):
if (not isinstance(other, Colormap) or
self.colorbar_extend != other.colorbar_extend):
return False
# To compare lookup tables the Colormaps have to be initialized
if not self._isinit:
self._init()
if not other._isinit:
other._init()
return np.array_equal(self._lut, other._lut)
def get_bad(self):
"""Get the color for masked values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_bad])
def set_bad(self, color='k', alpha=None):
"""Set the color for masked values."""
self._rgba_bad = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def get_under(self):
"""Get the color for low out-of-range values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_under])
def set_under(self, color='k', alpha=None):
"""Set the color for low out-of-range values."""
self._rgba_under = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def get_over(self):
"""Get the color for high out-of-range values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_over])
def set_over(self, color='k', alpha=None):
"""Set the color for high out-of-range values."""
self._rgba_over = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_extremes(self, *, bad=None, under=None, over=None):
"""
Set the colors for masked (*bad*) values and, when ``norm.clip =
False``, low (*under*) and high (*over*) out-of-range values.
"""
if bad is not None:
self.set_bad(bad)
if under is not None:
self.set_under(under)
if over is not None:
self.set_over(over)
def with_extremes(self, *, bad=None, under=None, over=None):
"""
Return a copy of the colormap, for which the colors for masked (*bad*)
values and, when ``norm.clip = False``, low (*under*) and high (*over*)
out-of-range values, have been set accordingly.
"""
new_cm = self.copy()
new_cm.set_extremes(bad=bad, under=under, over=over)
return new_cm
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
"""Generate the lookup table, ``self._lut``."""
raise NotImplementedError("Abstract class only")
def is_gray(self):
"""Return whether the colormap is grayscale."""
if not self._isinit:
self._init()
return (np.all(self._lut[:, 0] == self._lut[:, 1]) and
np.all(self._lut[:, 0] == self._lut[:, 2]))
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
if hasattr(self, '_resample'):
_api.warn_external(
"The ability to resample a color map is now public API "
f"However the class {type(self)} still only implements "
"the previous private _resample method. Please update "
"your class."
)
return self._resample(lutsize)
raise NotImplementedError()
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
.. note:: This function is not implemented for the base class.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
See Also
--------
LinearSegmentedColormap.reversed
ListedColormap.reversed
"""
raise NotImplementedError()
def _repr_png_(self):
"""Generate a PNG representation of the Colormap."""
X = np.tile(np.linspace(0, 1, _REPR_PNG_SIZE[0]),
(_REPR_PNG_SIZE[1], 1))
pixels = self(X, bytes=True)
png_bytes = io.BytesIO()
title = self.name + ' colormap'
author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'
pnginfo = PngInfo()
pnginfo.add_text('Title', title)
pnginfo.add_text('Description', title)
pnginfo.add_text('Author', author)
pnginfo.add_text('Software', author)
Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the Colormap."""
png_bytes = self._repr_png_()
png_base64 = base64.b64encode(png_bytes).decode('ascii')
def color_block(color):
hex_color = to_hex(color, keep_alpha=True)
return (f'')
return ('
'
f'{self.name} '
'
'
'
'
'
'
'
'
f'{color_block(self.get_under())} under'
'
'
'
'
f'bad {color_block(self.get_bad())}'
'
'
'
'
f'over {color_block(self.get_over())}'
'
'
'
')
def copy(self):
"""Return a copy of the colormap."""
return self.__copy__()
class LinearSegmentedColormap(Colormap):
"""
Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""
Create colormap from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table. Entries for alpha are optional.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
See Also
--------
LinearSegmentedColormap.from_list
Static method; factory function for generating a smoothly-varying
LinearSegmentedColormap.
"""
# True only if all colors in map are identical; needed for contouring.
self.monochrome = False
super().__init__(name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), float)
self._lut[:-3, 0] = _create_lookup_table(
self.N, self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = _create_lookup_table(
self.N, self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = _create_lookup_table(
self.N, self._segmentdata['blue'], self._gamma)
if 'alpha' in self._segmentdata:
self._lut[:-3, 3] = _create_lookup_table(
self.N, self._segmentdata['alpha'], 1)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""Set a new gamma value and regenerate colormap."""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Create a `LinearSegmentedColormap` from a list of colors.
Parameters
----------
name : str
The name of the colormap.
colors : list of :mpltype:`color` or list of (value, color)
If only colors are given, they are equidistantly mapped from the
range :math:`[0, 1]`; i.e. 0 maps to ``colors[0]`` and 1 maps to
``colors[-1]``.
If (value, color) pairs are given, the mapping is from *value*
to *color*. This can be used to divide the range unevenly.
N : int
The number of RGB quantization levels.
gamma : float
"""
if not np.iterable(colors):
raise ValueError('colors must be iterable')
if (isinstance(colors[0], Sized) and len(colors[0]) == 2
and not isinstance(colors[0], str)):
# List of value, color pairs
vals, colors = zip(*colors)
else:
vals = np.linspace(0, 1, len(colors))
r, g, b, a = to_rgba_array(colors).T
cdict = {
"red": np.column_stack([vals, r, r]),
"green": np.column_stack([vals, g, g]),
"blue": np.column_stack([vals, b, b]),
"alpha": np.column_stack([vals, a, a]),
}
return LinearSegmentedColormap(name, cdict, N, gamma)
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
new_cmap = LinearSegmentedColormap(self.name, self._segmentdata,
lutsize)
new_cmap._rgba_over = self._rgba_over
new_cmap._rgba_under = self._rgba_under
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
# Helper ensuring picklability of the reversed cmap.
@staticmethod
def _reverser(func, x):
return func(1 - x)
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
Returns
-------
LinearSegmentedColormap
The reversed colormap.
"""
if name is None:
name = self.name + "_r"
# Using a partial object keeps the cmap picklable.
data_r = {key: (functools.partial(self._reverser, data)
if callable(data) else
[(1.0 - x, y1, y0) for x, y0, y1 in reversed(data)])
for key, data in self._segmentdata.items()}
new_cmap = LinearSegmentedColormap(name, data_r, self.N, self._gamma)
# Reverse the over/under values too
new_cmap._rgba_over = self._rgba_under
new_cmap._rgba_under = self._rgba_over
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
class ListedColormap(Colormap):
"""
Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
Parameters
----------
colors : list, array
Sequence of Matplotlib color specifications (color names or RGB(A)
values).
name : str, optional
String to identify the colormap.
N : int, optional
Number of entries in the map. The default is *None*, in which case
there is one colormap entry for each element in the list of colors.
If ::
N < len(colors)
the list will be truncated at *N*. If ::
N > len(colors)
the list will be extended by repetition.
"""
def __init__(self, colors, name='from_list', N=None):
self.monochrome = False # Are all colors identical? (for contour.py)
if N is None:
self.colors = colors
N = len(colors)
else:
if isinstance(colors, str):
self.colors = [colors] * N
self.monochrome = True
elif np.iterable(colors):
if len(colors) == 1:
self.monochrome = True
self.colors = list(
itertools.islice(itertools.cycle(colors), N))
else:
try:
gray = float(colors)
except TypeError:
pass
else:
self.colors = [gray] * N
self.monochrome = True
super().__init__(name, N)
def _init(self):
self._lut = np.zeros((self.N + 3, 4), float)
self._lut[:-3] = to_rgba_array(self.colors)
self._isinit = True
self._set_extremes()
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
colors = self(np.linspace(0, 1, lutsize))
new_cmap = ListedColormap(colors, name=self.name)
# Keep the over/under values too
new_cmap._rgba_over = self._rgba_over
new_cmap._rgba_under = self._rgba_under
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
Returns
-------
ListedColormap
A reversed instance of the colormap.
"""
if name is None:
name = self.name + "_r"
colors_r = list(reversed(self.colors))
new_cmap = ListedColormap(colors_r, name=name, N=self.N)
# Reverse the over/under values too
new_cmap._rgba_over = self._rgba_under
new_cmap._rgba_under = self._rgba_over
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
class MultivarColormap:
"""
Class for holding multiple `~matplotlib.colors.Colormap` for use in a
`~matplotlib.cm.ScalarMappable` object
"""
def __init__(self, colormaps, combination_mode, name='multivariate colormap'):
"""
Parameters
----------
colormaps: list or tuple of `~matplotlib.colors.Colormap` objects
The individual colormaps that are combined
combination_mode: str, 'sRGB_add' or 'sRGB_sub'
Describe how colormaps are combined in sRGB space
- If 'sRGB_add' -> Mixing produces brighter colors
`sRGB = sum(colors)`
- If 'sRGB_sub' -> Mixing produces darker colors
`sRGB = 1 - sum(1 - colors)`
name : str, optional
The name of the colormap family.
"""
self.name = name
if not np.iterable(colormaps) \
or len(colormaps) == 1 \
or isinstance(colormaps, str):
raise ValueError("A MultivarColormap must have more than one colormap.")
colormaps = list(colormaps) # ensure cmaps is a list, i.e. not a tuple
for i, cmap in enumerate(colormaps):
if isinstance(cmap, str):
colormaps[i] = mpl.colormaps[cmap]
elif not isinstance(cmap, Colormap):
raise ValueError("colormaps must be a list of objects that subclass"
" Colormap or a name found in the colormap registry.")
self._colormaps = colormaps
_api.check_in_list(['sRGB_add', 'sRGB_sub'], combination_mode=combination_mode)
self._combination_mode = combination_mode
self.n_variates = len(colormaps)
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
def __call__(self, X, alpha=None, bytes=False, clip=True):
r"""
Parameters
----------
X : tuple (X0, X1, ...) of length equal to the number of colormaps
X0, X1 ...:
float or int, `~numpy.ndarray` or scalar
The data value(s) to convert to RGBA.
For floats, *Xi...* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, *Xi...* should be in the interval ``[0, self[i].N)`` to
return RGBA values *indexed* from colormap [i] with index ``Xi``, where
self[i] is colormap i.
alpha : float or array-like or None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching *Xi*, or None.
bytes : bool, default: False
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be `numpy.uint8`\s in the
interval ``[0, 255]``.
clip : bool, default: True
If True, clip output to 0 to 1
Returns
-------
Tuple of RGBA values if X[0] is scalar, otherwise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
if len(X) != len(self):
raise ValueError(
f'For the selected colormap the data must have a first dimension '
f'{len(self)}, not {len(X)}')
rgba, mask_bad = self[0]._get_rgba_and_mask(X[0], bytes=False)
for c, xx in zip(self[1:], X[1:]):
sub_rgba, sub_mask_bad = c._get_rgba_and_mask(xx, bytes=False)
rgba[..., :3] += sub_rgba[..., :3] # add colors
rgba[..., 3] *= sub_rgba[..., 3] # multiply alpha
mask_bad |= sub_mask_bad
if self.combination_mode == 'sRGB_sub':
rgba[..., :3] -= len(self) - 1
rgba[mask_bad] = self.get_bad()
if clip:
rgba = np.clip(rgba, 0, 1)
if alpha is not None:
if clip:
alpha = np.clip(alpha, 0, 1)
if np.shape(alpha) not in [(), np.shape(X[0])]:
raise ValueError(
f"alpha is array-like but its shape {np.shape(alpha)} does "
f"not match that of X[0] {np.shape(X[0])}")
rgba[..., -1] *= alpha
if bytes:
if not clip:
raise ValueError(
"clip cannot be false while bytes is true"
" as uint8 does not support values below 0"
" or above 255.")
rgba = (rgba * 255).astype('uint8')
if not np.iterable(X[0]):
rgba = tuple(rgba)
return rgba
def copy(self):
"""Return a copy of the multivarcolormap."""
return self.__copy__()
def __copy__(self):
cls = self.__class__
cmapobject = cls.__new__(cls)
cmapobject.__dict__.update(self.__dict__)
cmapobject._colormaps = [cm.copy() for cm in self._colormaps]
cmapobject._rgba_bad = np.copy(self._rgba_bad)
return cmapobject
def __eq__(self, other):
if not isinstance(other, MultivarColormap):
return False
if len(self) != len(other):
return False
for c0, c1 in zip(self, other):
if c0 != c1:
return False
if not all(self._rgba_bad == other._rgba_bad):
return False
if self.combination_mode != other.combination_mode:
return False
return True
def __getitem__(self, item):
return self._colormaps[item]
def __iter__(self):
for c in self._colormaps:
yield c
def __len__(self):
return len(self._colormaps)
def __str__(self):
return self.name
def get_bad(self):
"""Get the color for masked values."""
return np.array(self._rgba_bad)
def resampled(self, lutshape):
"""
Return a new colormap with *lutshape* entries.
Parameters
----------
lutshape : tuple of (`int`, `None`)
The tuple must have a length matching the number of variates.
For each element in the tuple, if `int`, the corresponding colorbar
is resampled, if `None`, the corresponding colorbar is not resampled.
Returns
-------
MultivarColormap
"""
if not np.iterable(lutshape) or len(lutshape) != len(self):
raise ValueError(f"lutshape must be of length {len(self)}")
new_cmap = self.copy()
for i, s in enumerate(lutshape):
if s is not None:
new_cmap._colormaps[i] = self[i].resampled(s)
return new_cmap
def with_extremes(self, *, bad=None, under=None, over=None):
"""
Return a copy of the `MultivarColormap` with modified out-of-range attributes.
The *bad* keyword modifies the copied `MultivarColormap` while *under* and
*over* modifies the attributes of the copied component colormaps.
Note that *under* and *over* colors are subject to the mixing rules determined
by the *combination_mode*.
Parameters
----------
bad: :mpltype:`color`, default: None
If Matplotlib color, the bad value is set accordingly in the copy
under tuple of :mpltype:`color`, default: None
If tuple, the `under` value of each component is set with the values
from the tuple.
over tuple of :mpltype:`color`, default: None
If tuple, the `over` value of each component is set with the values
from the tuple.
Returns
-------
MultivarColormap
copy of self with attributes set
"""
new_cm = self.copy()
if bad is not None:
new_cm._rgba_bad = to_rgba(bad)
if under is not None:
if not np.iterable(under) or len(under) != len(new_cm):
raise ValueError("*under* must contain a color for each scalar colormap"
f" i.e. be of length {len(new_cm)}.")
else:
for c, b in zip(new_cm, under):
c.set_under(b)
if over is not None:
if not np.iterable(over) or len(over) != len(new_cm):
raise ValueError("*over* must contain a color for each scalar colormap"
f" i.e. be of length {len(new_cm)}.")
else:
for c, b in zip(new_cm, over):
c.set_over(b)
return new_cm
@property
def combination_mode(self):
return self._combination_mode
def _repr_png_(self):
"""Generate a PNG representation of the Colormap."""
X = np.tile(np.linspace(0, 1, _REPR_PNG_SIZE[0]),
(_REPR_PNG_SIZE[1], 1))
pixels = np.zeros((_REPR_PNG_SIZE[1]*len(self), _REPR_PNG_SIZE[0], 4),
dtype=np.uint8)
for i, c in enumerate(self):
pixels[i*_REPR_PNG_SIZE[1]:(i+1)*_REPR_PNG_SIZE[1], :] = c(X, bytes=True)
png_bytes = io.BytesIO()
title = self.name + ' multivariate colormap'
author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'
pnginfo = PngInfo()
pnginfo.add_text('Title', title)
pnginfo.add_text('Description', title)
pnginfo.add_text('Author', author)
pnginfo.add_text('Software', author)
Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the MultivarColormap."""
return ''.join([c._repr_html_() for c in self._colormaps])
class BivarColormap:
"""
Base class for all bivariate to RGBA mappings.
Designed as a drop-in replacement for Colormap when using a 2D
lookup table. To be used with `~matplotlib.cm.ScalarMappable`.
"""
def __init__(self, N=256, M=256, shape='square', origin=(0, 0),
name='bivariate colormap'):
"""
Parameters
----------
N : int, default: 256
The number of RGB quantization levels along the first axis.
M : int, default: 256
The number of RGB quantization levels along the second axis.
shape : {'square', 'circle', 'ignore', 'circleignore'}
- 'square' each variate is clipped to [0,1] independently
- 'circle' the variates are clipped radially to the center
of the colormap, and a circular mask is applied when the colormap
is displayed
- 'ignore' the variates are not clipped, but instead assigned the
'outside' color
- 'circleignore' a circular mask is applied, but the data is not
clipped and instead assigned the 'outside' color
origin : (float, float), default: (0,0)
The relative origin of the colormap. Typically (0, 0), for colormaps
that are linear on both axis, and (.5, .5) for circular colormaps.
Used when getting 1D colormaps from 2D colormaps.
name : str, optional
The name of the colormap.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self.M = int(M)
_api.check_in_list(['square', 'circle', 'ignore', 'circleignore'], shape=shape)
self._shape = shape
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_outside = (1.0, 0.0, 1.0, 1.0)
self._isinit = False
self.n_variates = 2
self._origin = (float(origin[0]), float(origin[1]))
'''#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: `matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False'''
def __call__(self, X, alpha=None, bytes=False):
r"""
Parameters
----------
X : tuple (X0, X1), X0 and X1: float or int or array-like
The data value(s) to convert to RGBA.
- For floats, *X* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap.
- For integers, *X* should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float or array-like or None, default: None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching X0, or None.
bytes : bool, default: False
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be `numpy.uint8`\s in the
interval ``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, otherwise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
if len(X) != 2:
raise ValueError(
f'For a `BivarColormap` the data must have a first dimension '
f'2, not {len(X)}')
if not self._isinit:
self._init()
X0 = np.ma.array(X[0], copy=True)
X1 = np.ma.array(X[1], copy=True)
# clip to shape of colormap, circle square, etc.
self._clip((X0, X1))
# Native byteorder is faster.
if not X0.dtype.isnative:
X0 = X0.byteswap().view(X0.dtype.newbyteorder())
if not X1.dtype.isnative:
X1 = X1.byteswap().view(X1.dtype.newbyteorder())
if X0.dtype.kind == "f":
X0 *= self.N
# xa == 1 (== N after multiplication) is not out of range.
X0[X0 == self.N] = self.N - 1
if X1.dtype.kind == "f":
X1 *= self.M
# xa == 1 (== N after multiplication) is not out of range.
X1[X1 == self.M] = self.M - 1
# Pre-compute the masks before casting to int (which can truncate)
mask_outside = (X0 < 0) | (X1 < 0) | (X0 >= self.N) | (X1 >= self.M)
# If input was masked, get the bad mask from it; else mask out nans.
mask_bad_0 = X0.mask if np.ma.is_masked(X0) else np.isnan(X0)
mask_bad_1 = X1.mask if np.ma.is_masked(X1) else np.isnan(X1)
mask_bad = mask_bad_0 | mask_bad_1
with np.errstate(invalid="ignore"):
# We need this cast for unsigned ints as well as floats
X0 = X0.astype(int)
X1 = X1.astype(int)
# Set masked values to zero
# The corresponding rgb values will be replaced later
for X_part in [X0, X1]:
X_part[mask_outside] = 0
X_part[mask_bad] = 0
rgba = self._lut[X0, X1]
if np.isscalar(X[0]):
rgba = np.copy(rgba)
rgba[mask_outside] = self._rgba_outside
rgba[mask_bad] = self._rgba_bad
if bytes:
rgba = (rgba * 255).astype(np.uint8)
if alpha is not None:
alpha = np.clip(alpha, 0, 1)
if bytes:
alpha *= 255 # Will be cast to uint8 upon assignment.
if np.shape(alpha) not in [(), np.shape(X0)]:
raise ValueError(
f"alpha is array-like but its shape {np.shape(alpha)} does "
f"not match that of X[0] {np.shape(X0)}")
rgba[..., -1] = alpha
# If the "bad" color is all zeros, then ignore alpha input.
if (np.array(self._rgba_bad) == 0).all():
rgba[mask_bad] = (0, 0, 0, 0)
if not np.iterable(X[0]):
rgba = tuple(rgba)
return rgba
@property
def lut(self):
"""
For external access to the lut, i.e. for displaying the cmap.
For circular colormaps this returns a lut with a circular mask.
Internal functions (such as to_rgb()) should use _lut
which stores the lut without a circular mask
A lut without the circular mask is needed in to_rgb() because the
conversion from floats to ints results in some some pixel-requests
just outside of the circular mask
"""
if not self._isinit:
self._init()
lut = np.copy(self._lut)
if self.shape == 'circle' or self.shape == 'circleignore':
n = np.linspace(-1, 1, self.N)
m = np.linspace(-1, 1, self.M)
radii_sqr = (n**2)[:, np.newaxis] + (m**2)[np.newaxis, :]
mask_outside = radii_sqr > 1
lut[mask_outside, 3] = 0
return lut
def __copy__(self):
cls = self.__class__
cmapobject = cls.__new__(cls)
cmapobject.__dict__.update(self.__dict__)
cmapobject._rgba_outside = np.copy(self._rgba_outside)
cmapobject._rgba_bad = np.copy(self._rgba_bad)
cmapobject._shape = self.shape
if self._isinit:
cmapobject._lut = np.copy(self._lut)
return cmapobject
def __eq__(self, other):
if not isinstance(other, BivarColormap):
return False
# To compare lookup tables the Colormaps have to be initialized
if not self._isinit:
self._init()
if not other._isinit:
other._init()
if not np.array_equal(self._lut, other._lut):
return False
if not np.array_equal(self._rgba_bad, other._rgba_bad):
return False
if not np.array_equal(self._rgba_outside, other._rgba_outside):
return False
if self.shape != other.shape:
return False
return True
def get_bad(self):
"""Get the color for masked values."""
return self._rgba_bad
def get_outside(self):
"""Get the color for out-of-range values."""
return self._rgba_outside
def resampled(self, lutshape, transposed=False):
"""
Return a new colormap with *lutshape* entries.
Note that this function does not move the origin.
Parameters
----------
lutshape : tuple of ints or None
The tuple must be of length 2, and each entry is either an int or None.
- If an int, the corresponding axis is resampled.
- If negative the corresponding axis is resampled in reverse
- If -1, the axis is inverted
- If 1 or None, the corresponding axis is not resampled.
transposed : bool, default: False
if True, the axes are swapped after resampling
Returns
-------
BivarColormap
"""
if not np.iterable(lutshape) or len(lutshape) != 2:
raise ValueError("lutshape must be of length 2")
lutshape = [lutshape[0], lutshape[1]]
if lutshape[0] is None or lutshape[0] == 1:
lutshape[0] = self.N
if lutshape[1] is None or lutshape[1] == 1:
lutshape[1] = self.M
inverted = [False, False]
if lutshape[0] < 0:
inverted[0] = True
lutshape[0] = -lutshape[0]
if lutshape[0] == 1:
lutshape[0] = self.N
if lutshape[1] < 0:
inverted[1] = True
lutshape[1] = -lutshape[1]
if lutshape[1] == 1:
lutshape[1] = self.M
x_0, x_1 = np.mgrid[0:1:(lutshape[0] * 1j), 0:1:(lutshape[1] * 1j)]
if inverted[0]:
x_0 = x_0[::-1, :]
if inverted[1]:
x_1 = x_1[:, ::-1]
# we need to use shape = 'square' while resampling the colormap.
# if the colormap has shape = 'circle' we would otherwise get *outside* in the
# resampled colormap
shape_memory = self._shape
self._shape = 'square'
if transposed:
new_lut = self((x_1, x_0))
new_cmap = BivarColormapFromImage(new_lut, name=self.name,
shape=shape_memory,
origin=self.origin[::-1])
else:
new_lut = self((x_0, x_1))
new_cmap = BivarColormapFromImage(new_lut, name=self.name,
shape=shape_memory,
origin=self.origin)
self._shape = shape_memory
new_cmap._rgba_bad = self._rgba_bad
new_cmap._rgba_outside = self._rgba_outside
return new_cmap
def reversed(self, axis_0=True, axis_1=True):
"""
Reverses both or one of the axis.
"""
r_0 = -1 if axis_0 else 1
r_1 = -1 if axis_1 else 1
return self.resampled((r_0, r_1))
def transposed(self):
"""
Transposes the colormap by swapping the order of the axis
"""
return self.resampled((None, None), transposed=True)
def with_extremes(self, *, bad=None, outside=None, shape=None, origin=None):
"""
Return a copy of the `BivarColormap` with modified attributes.
Note that the *outside* color is only relevant if `shape` = 'ignore'
or 'circleignore'.
Parameters
----------
bad : None or :mpltype:`color`
If Matplotlib color, the *bad* value is set accordingly in the copy
outside : None or :mpltype:`color`
If Matplotlib color and shape is 'ignore' or 'circleignore', values
*outside* the colormap are colored accordingly in the copy
shape : {'square', 'circle', 'ignore', 'circleignore'}
- If 'square' each variate is clipped to [0,1] independently
- If 'circle' the variates are clipped radially to the center
of the colormap, and a circular mask is applied when the colormap
is displayed
- If 'ignore' the variates are not clipped, but instead assigned the
*outside* color
- If 'circleignore' a circular mask is applied, but the data is not
clipped and instead assigned the *outside* color
origin : (float, float)
The relative origin of the colormap. Typically (0, 0), for colormaps
that are linear on both axis, and (.5, .5) for circular colormaps.
Used when getting 1D colormaps from 2D colormaps.
Returns
-------
BivarColormap
copy of self with attributes set
"""
new_cm = self.copy()
if bad is not None:
new_cm._rgba_bad = to_rgba(bad)
if outside is not None:
new_cm._rgba_outside = to_rgba(outside)
if shape is not None:
_api.check_in_list(['square', 'circle', 'ignore', 'circleignore'],
shape=shape)
new_cm._shape = shape
if origin is not None:
new_cm._origin = (float(origin[0]), float(origin[1]))
return new_cm
def _init(self):
"""Generate the lookup table, ``self._lut``."""
raise NotImplementedError("Abstract class only")
@property
def shape(self):
return self._shape
@property
def origin(self):
return self._origin
def _clip(self, X):
"""
For internal use when applying a BivarColormap to data.
i.e. cm.ScalarMappable().to_rgba()
Clips X[0] and X[1] according to 'self.shape'.
X is modified in-place.
Parameters
----------
X: np.array
array of floats or ints to be clipped
shape : {'square', 'circle', 'ignore', 'circleignore'}
- If 'square' each variate is clipped to [0,1] independently
- If 'circle' the variates are clipped radially to the center
of the colormap.
It is assumed that a circular mask is applied when the colormap
is displayed
- If 'ignore' the variates are not clipped, but instead assigned the
'outside' color
- If 'circleignore' a circular mask is applied, but the data is not clipped
and instead assigned the 'outside' color
"""
if self.shape == 'square':
for X_part, mx in zip(X, (self.N, self.M)):
X_part[X_part < 0] = 0
if X_part.dtype.kind == "f":
X_part[X_part > 1] = 1
else:
X_part[X_part >= mx] = mx - 1
elif self.shape == 'ignore':
for X_part, mx in zip(X, (self.N, self.M)):
X_part[X_part < 0] = -1
if X_part.dtype.kind == "f":
X_part[X_part > 1] = -1
else:
X_part[X_part >= mx] = -1
elif self.shape == 'circle' or self.shape == 'circleignore':
for X_part in X:
if X_part.dtype.kind != "f":
raise NotImplementedError(
"Circular bivariate colormaps are only"
" implemented for use with with floats")
radii_sqr = (X[0] - 0.5)**2 + (X[1] - 0.5)**2
mask_outside = radii_sqr > 0.25
if self.shape == 'circle':
overextend = 2 * np.sqrt(radii_sqr[mask_outside])
X[0][mask_outside] = (X[0][mask_outside] - 0.5) / overextend + 0.5
X[1][mask_outside] = (X[1][mask_outside] - 0.5) / overextend + 0.5
else:
X[0][mask_outside] = -1
X[1][mask_outside] = -1
def __getitem__(self, item):
"""Creates and returns a colorbar along the selected axis"""
if not self._isinit:
self._init()
if item == 0:
origin_1_as_int = int(self._origin[1]*self.M)
if origin_1_as_int > self.M-1:
origin_1_as_int = self.M-1
one_d_lut = self._lut[:, origin_1_as_int]
new_cmap = ListedColormap(one_d_lut, name=f'{self.name}_0', N=self.N)
elif item == 1:
origin_0_as_int = int(self._origin[0]*self.N)
if origin_0_as_int > self.N-1:
origin_0_as_int = self.N-1
one_d_lut = self._lut[origin_0_as_int, :]
new_cmap = ListedColormap(one_d_lut, name=f'{self.name}_1', N=self.M)
else:
raise KeyError(f"only 0 or 1 are"
f" valid keys for BivarColormap, not {item!r}")
new_cmap._rgba_bad = self._rgba_bad
if self.shape in ['ignore', 'circleignore']:
new_cmap.set_over(self._rgba_outside)
new_cmap.set_under(self._rgba_outside)
return new_cmap
def _repr_png_(self):
"""Generate a PNG representation of the BivarColormap."""
if not self._isinit:
self._init()
pixels = self.lut
if pixels.shape[0] < _BIVAR_REPR_PNG_SIZE:
pixels = np.repeat(pixels,
repeats=_BIVAR_REPR_PNG_SIZE//pixels.shape[0],
axis=0)[:256, :]
if pixels.shape[1] < _BIVAR_REPR_PNG_SIZE:
pixels = np.repeat(pixels,
repeats=_BIVAR_REPR_PNG_SIZE//pixels.shape[1],
axis=1)[:, :256]
pixels = (pixels[::-1, :, :] * 255).astype(np.uint8)
png_bytes = io.BytesIO()
title = self.name + ' BivarColormap'
author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'
pnginfo = PngInfo()
pnginfo.add_text('Title', title)
pnginfo.add_text('Description', title)
pnginfo.add_text('Author', author)
pnginfo.add_text('Software', author)
Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the Colormap."""
png_bytes = self._repr_png_()
png_base64 = base64.b64encode(png_bytes).decode('ascii')
def color_block(color):
hex_color = to_hex(color, keep_alpha=True)
return (f'')
return ('
'
f'{self.name} '
'
'
'
'
'
'
'
'
f'{color_block(self.get_outside())} outside'
'
'
'
'
f'bad {color_block(self.get_bad())}'
'
')
def copy(self):
"""Return a copy of the colormap."""
return self.__copy__()
class SegmentedBivarColormap(BivarColormap):
"""
BivarColormap object generated by supersampling a regular grid.
Parameters
----------
patch : np.array
Patch is required to have a shape (k, l, 3), and will get supersampled
to a lut of shape (N, N, 4).
N : int
The number of RGB quantization levels along each axis.
shape : {'square', 'circle', 'ignore', 'circleignore'}
- If 'square' each variate is clipped to [0,1] independently
- If 'circle' the variates are clipped radially to the center
of the colormap, and a circular mask is applied when the colormap
is displayed
- If 'ignore' the variates are not clipped, but instead assigned the
'outside' color
- If 'circleignore' a circular mask is applied, but the data is not clipped
origin : (float, float)
The relative origin of the colormap. Typically (0, 0), for colormaps
that are linear on both axis, and (.5, .5) for circular colormaps.
Used when getting 1D colormaps from 2D colormaps.
name : str, optional
The name of the colormap.
"""
def __init__(self, patch, N=256, shape='square', origin=(0, 0),
name='segmented bivariate colormap'):
_api.check_shape((None, None, 3), patch=patch)
self.patch = patch
super().__init__(N, N, shape, origin, name=name)
def _init(self):
s = self.patch.shape
_patch = np.empty((s[0], s[1], 4))
_patch[:, :, :3] = self.patch
_patch[:, :, 3] = 1
transform = mpl.transforms.Affine2D().translate(-0.5, -0.5)\
.scale(self.N / (s[1] - 1), self.N / (s[0] - 1))
self._lut = np.empty((self.N, self.N, 4))
_image.resample(_patch, self._lut, transform, _image.BILINEAR,
resample=False, alpha=1)
self._isinit = True
class BivarColormapFromImage(BivarColormap):
"""
BivarColormap object generated by supersampling a regular grid.
Parameters
----------
lut : nparray of shape (N, M, 3) or (N, M, 4)
The look-up-table
shape: {'square', 'circle', 'ignore', 'circleignore'}
- If 'square' each variate is clipped to [0,1] independently
- If 'circle' the variates are clipped radially to the center
of the colormap, and a circular mask is applied when the colormap
is displayed
- If 'ignore' the variates are not clipped, but instead assigned the
'outside' color
- If 'circleignore' a circular mask is applied, but the data is not clipped
origin: (float, float)
The relative origin of the colormap. Typically (0, 0), for colormaps
that are linear on both axis, and (.5, .5) for circular colormaps.
Used when getting 1D colormaps from 2D colormaps.
name : str, optional
The name of the colormap.
"""
def __init__(self, lut, shape='square', origin=(0, 0), name='from image'):
# We can allow for a PIL.Image as input in the following way, but importing
# matplotlib.image.pil_to_array() results in a circular import
# For now, this function only accepts numpy arrays.
# i.e.:
# if isinstance(Image, lut):
# lut = image.pil_to_array(lut)
lut = np.array(lut, copy=True)
if lut.ndim != 3 or lut.shape[2] not in (3, 4):
raise ValueError("The lut must be an array of shape (n, m, 3) or (n, m, 4)",
" or a PIL.image encoded as RGB or RGBA")
if lut.dtype == np.uint8:
lut = lut.astype(np.float32)/255
if lut.shape[2] == 3:
new_lut = np.empty((lut.shape[0], lut.shape[1], 4), dtype=lut.dtype)
new_lut[:, :, :3] = lut
new_lut[:, :, 3] = 1.
lut = new_lut
self._lut = lut
super().__init__(lut.shape[0], lut.shape[1], shape, origin, name=name)
def _init(self):
self._isinit = True
class Normalize:
"""
A class which, when called, maps values within the interval
``[vmin, vmax]`` linearly to the interval ``[0.0, 1.0]``. The mapping of
values outside ``[vmin, vmax]`` depends on *clip*.
Examples
--------
::
x = [-2, -1, 0, 1, 2]
norm = mpl.colors.Normalize(vmin=-1, vmax=1, clip=False)
norm(x) # [-0.5, 0., 0.5, 1., 1.5]
norm = mpl.colors.Normalize(vmin=-1, vmax=1, clip=True)
norm(x) # [0., 0., 0.5, 1., 1.]
See Also
--------
:ref:`colormapnorms`
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
Parameters
----------
vmin, vmax : float or None
Values within the range ``[vmin, vmax]`` from the input data will be
linearly mapped to ``[0, 1]``. If either *vmin* or *vmax* is not
provided, they default to the minimum and maximum values of the input,
respectively.
clip : bool, default: False
Determines the behavior for mapping values outside the range
``[vmin, vmax]``.
If clipping is off, values outside the range ``[vmin, vmax]`` are
also transformed, resulting in values outside ``[0, 1]``. This
behavior is usually desirable, as colormaps can mark these *under*
and *over* values with specific colors.
If clipping is on, values below *vmin* are mapped to 0 and values
above *vmax* are mapped to 1. Such values become indistinguishable
from regular boundary values, which may cause misinterpretation of
the data.
Notes
-----
If ``vmin == vmax``, input data will be mapped to 0.
"""
self._vmin = _sanitize_extrema(vmin)
self._vmax = _sanitize_extrema(vmax)
self._clip = clip
self._scale = None
self.callbacks = cbook.CallbackRegistry(signals=["changed"])
@property
def vmin(self):
return self._vmin
@vmin.setter
def vmin(self, value):
value = _sanitize_extrema(value)
if value != self._vmin:
self._vmin = value
self._changed()
@property
def vmax(self):
return self._vmax
@vmax.setter
def vmax(self, value):
value = _sanitize_extrema(value)
if value != self._vmax:
self._vmax = value
self._changed()
@property
def clip(self):
return self._clip
@clip.setter
def clip(self, value):
if value != self._clip:
self._clip = value
self._changed()
def _changed(self):
"""
Call this whenever the norm is changed to notify all the
callback listeners to the 'changed' signal.
"""
self.callbacks.process('changed')
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Parameters
----------
value
Data to normalize.
Returns
-------
result : masked array
Masked array with the same shape as *value*.
is_scalar : bool
Whether *value* is a scalar.
Notes
-----
Float dtypes are preserved; integer types with two bytes or smaller are
converted to np.float32, and larger types are converted to np.float64.
Preserving float32 when possible, and using in-place operations,
greatly improves speed for large arrays.
"""
is_scalar = not np.iterable(value)
if is_scalar:
value = [value]
dtype = np.min_scalar_type(value)
if np.issubdtype(dtype, np.integer) or dtype.type is np.bool_:
# bool_/int8/int16 -> float32; int32/int64 -> float64
dtype = np.promote_types(dtype, np.float32)
# ensure data passed in as an ndarray subclass are interpreted as
# an ndarray. See issue #6622.
mask = np.ma.getmask(value)
data = np.asarray(value)
result = np.ma.array(data, mask=mask, dtype=dtype, copy=True)
return result, is_scalar
def __call__(self, value, clip=None):
"""
Normalize the data and return the normalized data.
Parameters
----------
value
Data to normalize.
clip : bool, optional
See the description of the parameter *clip* in `.Normalize`.
If ``None``, defaults to ``self.clip`` (which defaults to
``False``).
Notes
-----
If not already initialized, ``self.vmin`` and ``self.vmax`` are
initialized using ``self.autoscale_None(value)``.
"""
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
if self.vmin is None or self.vmax is None:
self.autoscale_None(result)
# Convert at least to float, without losing precision.
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
if vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
else:
if clip:
mask = np.ma.getmask(result)
result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
"""
Maps the normalized value (i.e., index in the colormap) back to image
data value.
Parameters
----------
value
Normalized value.
"""
if not self.scaled():
raise ValueError("Not invertible until both vmin and vmax are set")
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
if np.iterable(value):
val = np.ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
"""Set *vmin*, *vmax* to min, max of *A*."""
with self.callbacks.blocked():
# Pause callbacks while we are updating so we only get
# a single update signal at the end
self.vmin = self.vmax = None
self.autoscale_None(A)
self._changed()
def autoscale_None(self, A):
"""If *vmin* or *vmax* are not set, use the min/max of *A* to set them."""
A = np.asanyarray(A)
if isinstance(A, np.ma.MaskedArray):
# we need to make the distinction between an array, False, np.bool_(False)
if A.mask is False or not A.mask.shape:
A = A.data
if self.vmin is None and A.size:
self.vmin = A.min()
if self.vmax is None and A.size:
self.vmax = A.max()
def scaled(self):
"""Return whether *vmin* and *vmax* are both set."""
return self.vmin is not None and self.vmax is not None
class TwoSlopeNorm(Normalize):
def __init__(self, vcenter, vmin=None, vmax=None):
"""
Normalize data with a set center.
Useful when mapping data with an unequal rates of change around a
conceptual center, e.g., data that range from -2 to 4, with 0 as
the midpoint.
Parameters
----------
vcenter : float
The data value that defines ``0.5`` in the normalization.
vmin : float, optional
The data value that defines ``0.0`` in the normalization.
Defaults to the min value of the dataset.
vmax : float, optional
The data value that defines ``1.0`` in the normalization.
Defaults to the max value of the dataset.
Examples
--------
This maps data value -4000 to 0., 0 to 0.5, and +10000 to 1.0; data
between is linearly interpolated::
>>> import matplotlib.colors as mcolors
>>> offset = mcolors.TwoSlopeNorm(vmin=-4000.,
... vcenter=0., vmax=10000)
>>> data = [-4000., -2000., 0., 2500., 5000., 7500., 10000.]
>>> offset(data)
array([0., 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])
"""
super().__init__(vmin=vmin, vmax=vmax)
self._vcenter = vcenter
if vcenter is not None and vmax is not None and vcenter >= vmax:
raise ValueError('vmin, vcenter, and vmax must be in '
'ascending order')
if vcenter is not None and vmin is not None and vcenter <= vmin:
raise ValueError('vmin, vcenter, and vmax must be in '
'ascending order')
@property
def vcenter(self):
return self._vcenter
@vcenter.setter
def vcenter(self, value):
if value != self._vcenter:
self._vcenter = value
self._changed()
def autoscale_None(self, A):
"""
Get vmin and vmax.
If vcenter isn't in the range [vmin, vmax], either vmin or vmax
is expanded so that vcenter lies in the middle of the modified range
[vmin, vmax].
"""
super().autoscale_None(A)
if self.vmin >= self.vcenter:
self.vmin = self.vcenter - (self.vmax - self.vcenter)
if self.vmax <= self.vcenter:
self.vmax = self.vcenter + (self.vcenter - self.vmin)
def __call__(self, value, clip=None):
"""
Map value to the interval [0, 1]. The *clip* argument is unused.
"""
result, is_scalar = self.process_value(value)
self.autoscale_None(result) # sets self.vmin, self.vmax if None
if not self.vmin <= self.vcenter <= self.vmax:
raise ValueError("vmin, vcenter, vmax must increase monotonically")
# note that we must extrapolate for tick locators:
result = np.ma.masked_array(
np.interp(result, [self.vmin, self.vcenter, self.vmax],
[0, 0.5, 1], left=-np.inf, right=np.inf),
mask=np.ma.getmask(result))
if is_scalar:
result = np.atleast_1d(result)[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until both vmin and vmax are set")
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
(vcenter,), _ = self.process_value(self.vcenter)
result = np.interp(value, [0, 0.5, 1], [vmin, vcenter, vmax],
left=-np.inf, right=np.inf)
return result
class CenteredNorm(Normalize):
def __init__(self, vcenter=0, halfrange=None, clip=False):
"""
Normalize symmetrical data around a center (0 by default).
Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change
around the center.
Useful when mapping symmetrical data around a conceptual center
e.g., data that range from -2 to 4, with 0 as the midpoint, and
with equal rates of change around that midpoint.
Parameters
----------
vcenter : float, default: 0
The data value that defines ``0.5`` in the normalization.
halfrange : float, optional
The range of data values that defines a range of ``0.5`` in the
normalization, so that *vcenter* - *halfrange* is ``0.0`` and
*vcenter* + *halfrange* is ``1.0`` in the normalization.
Defaults to the largest absolute difference to *vcenter* for
the values in the dataset.
clip : bool, default: False
Determines the behavior for mapping values outside the range
``[vmin, vmax]``.
If clipping is off, values outside the range ``[vmin, vmax]`` are
also transformed, resulting in values outside ``[0, 1]``. This
behavior is usually desirable, as colormaps can mark these *under*
and *over* values with specific colors.
If clipping is on, values below *vmin* are mapped to 0 and values
above *vmax* are mapped to 1. Such values become indistinguishable
from regular boundary values, which may cause misinterpretation of
the data.
Examples
--------
This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0
(assuming equal rates of change above and below 0.0):
>>> import matplotlib.colors as mcolors
>>> norm = mcolors.CenteredNorm(halfrange=4.0)
>>> data = [-2., 0., 4.]
>>> norm(data)
array([0.25, 0.5 , 1. ])
"""
super().__init__(vmin=None, vmax=None, clip=clip)
self._vcenter = vcenter
# calling the halfrange setter to set vmin and vmax
self.halfrange = halfrange
def autoscale(self, A):
"""
Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*.
"""
A = np.asanyarray(A)
self.halfrange = max(self._vcenter-A.min(),
A.max()-self._vcenter)
def autoscale_None(self, A):
"""Set *vmin* and *vmax*."""
A = np.asanyarray(A)
if self.halfrange is None and A.size:
self.autoscale(A)
@property
def vmin(self):
return self._vmin
@vmin.setter
def vmin(self, value):
value = _sanitize_extrema(value)
if value != self._vmin:
self._vmin = value
self._vmax = 2*self.vcenter - value
self._changed()
@property
def vmax(self):
return self._vmax
@vmax.setter
def vmax(self, value):
value = _sanitize_extrema(value)
if value != self._vmax:
self._vmax = value
self._vmin = 2*self.vcenter - value
self._changed()
@property
def vcenter(self):
return self._vcenter
@vcenter.setter
def vcenter(self, vcenter):
if vcenter != self._vcenter:
self._vcenter = vcenter
# Trigger an update of the vmin/vmax values through the setter
self.halfrange = self.halfrange
self._changed()
@property
def halfrange(self):
if self.vmin is None or self.vmax is None:
return None
return (self.vmax - self.vmin) / 2
@halfrange.setter
def halfrange(self, halfrange):
if halfrange is None:
self.vmin = None
self.vmax = None
else:
self.vmin = self.vcenter - abs(halfrange)
self.vmax = self.vcenter + abs(halfrange)
def make_norm_from_scale(scale_cls, base_norm_cls=None, *, init=None):
"""
Decorator for building a `.Normalize` subclass from a `~.scale.ScaleBase`
subclass.
After ::
@make_norm_from_scale(scale_cls)
class norm_cls(Normalize):
...
*norm_cls* is filled with methods so that normalization computations are
forwarded to *scale_cls* (i.e., *scale_cls* is the scale that would be used
for the colorbar of a mappable normalized with *norm_cls*).
If *init* is not passed, then the constructor signature of *norm_cls*
will be ``norm_cls(vmin=None, vmax=None, clip=False)``; these three
parameters will be forwarded to the base class (``Normalize.__init__``),
and a *scale_cls* object will be initialized with no arguments (other than
a dummy axis).
If the *scale_cls* constructor takes additional parameters, then *init*
should be passed to `make_norm_from_scale`. It is a callable which is
*only* used for its signature. First, this signature will become the
signature of *norm_cls*. Second, the *norm_cls* constructor will bind the
parameters passed to it using this signature, extract the bound *vmin*,
*vmax*, and *clip* values, pass those to ``Normalize.__init__``, and
forward the remaining bound values (including any defaults defined by the
signature) to the *scale_cls* constructor.
"""
if base_norm_cls is None:
return functools.partial(make_norm_from_scale, scale_cls, init=init)
if isinstance(scale_cls, functools.partial):
scale_args = scale_cls.args
scale_kwargs_items = tuple(scale_cls.keywords.items())
scale_cls = scale_cls.func
else:
scale_args = scale_kwargs_items = ()
if init is None:
def init(vmin=None, vmax=None, clip=False): pass
return _make_norm_from_scale(
scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, inspect.signature(init))
@functools.cache
def _make_norm_from_scale(
scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, bound_init_signature,
):
"""
Helper for `make_norm_from_scale`.
This function is split out to enable caching (in particular so that
different unpickles reuse the same class). In order to do so,
- ``functools.partial`` *scale_cls* is expanded into ``func, args, kwargs``
to allow memoizing returned norms (partial instances always compare
unequal, but we can check identity based on ``func, args, kwargs``;
- *init* is replaced by *init_signature*, as signatures are picklable,
unlike to arbitrary lambdas.
"""
class Norm(base_norm_cls):
def __reduce__(self):
cls = type(self)
# If the class is toplevel-accessible, it is possible to directly
# pickle it "by name". This is required to support norm classes
# defined at a module's toplevel, as the inner base_norm_cls is
# otherwise unpicklable (as it gets shadowed by the generated norm
# class). If either import or attribute access fails, fall back to
# the general path.
try:
if cls is getattr(importlib.import_module(cls.__module__),
cls.__qualname__):
return (_create_empty_object_of_class, (cls,), vars(self))
except (ImportError, AttributeError):
pass
return (_picklable_norm_constructor,
(scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, bound_init_signature),
vars(self))
def __init__(self, *args, **kwargs):
ba = bound_init_signature.bind(*args, **kwargs)
ba.apply_defaults()
super().__init__(
**{k: ba.arguments.pop(k) for k in ["vmin", "vmax", "clip"]})
self._scale = functools.partial(
scale_cls, *scale_args, **dict(scale_kwargs_items))(
axis=None, **ba.arguments)
self._trf = self._scale.get_transform()
__init__.__signature__ = bound_init_signature.replace(parameters=[
inspect.Parameter("self", inspect.Parameter.POSITIONAL_OR_KEYWORD),
*bound_init_signature.parameters.values()])
def __call__(self, value, clip=None):
value, is_scalar = self.process_value(value)
if self.vmin is None or self.vmax is None:
self.autoscale_None(value)
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
if self.vmin == self.vmax:
return np.full_like(value, 0)
if clip is None:
clip = self.clip
if clip:
value = np.clip(value, self.vmin, self.vmax)
t_value = self._trf.transform(value).reshape(np.shape(value))
t_vmin, t_vmax = self._trf.transform([self.vmin, self.vmax])
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
t_value -= t_vmin
t_value /= (t_vmax - t_vmin)
t_value = np.ma.masked_invalid(t_value, copy=False)
return t_value[0] if is_scalar else t_value
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
t_vmin, t_vmax = self._trf.transform([self.vmin, self.vmax])
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
value, is_scalar = self.process_value(value)
rescaled = value * (t_vmax - t_vmin)
rescaled += t_vmin
value = (self._trf
.inverted()
.transform(rescaled)
.reshape(np.shape(value)))
return value[0] if is_scalar else value
def autoscale_None(self, A):
# i.e. A[np.isfinite(...)], but also for non-array A's
in_trf_domain = np.extract(np.isfinite(self._trf.transform(A)), A)
if in_trf_domain.size == 0:
in_trf_domain = np.ma.masked
return super().autoscale_None(in_trf_domain)
if base_norm_cls is Normalize:
Norm.__name__ = f"{scale_cls.__name__}Norm"
Norm.__qualname__ = f"{scale_cls.__qualname__}Norm"
else:
Norm.__name__ = base_norm_cls.__name__
Norm.__qualname__ = base_norm_cls.__qualname__
Norm.__module__ = base_norm_cls.__module__
Norm.__doc__ = base_norm_cls.__doc__
return Norm
def _create_empty_object_of_class(cls):
return cls.__new__(cls)
def _picklable_norm_constructor(*args):
return _create_empty_object_of_class(_make_norm_from_scale(*args))
@make_norm_from_scale(
scale.FuncScale,
init=lambda functions, vmin=None, vmax=None, clip=False: None)
class FuncNorm(Normalize):
"""
Arbitrary normalization using functions for the forward and inverse.
Parameters
----------
functions : (callable, callable)
two-tuple of the forward and inverse functions for the normalization.
The forward function must be monotonic.
Both functions must have the signature ::
def forward(values: array-like) -> array-like
vmin, vmax : float or None
If *vmin* and/or *vmax* is not given, they are initialized from the
minimum and maximum value, respectively, of the first input
processed; i.e., ``__call__(A)`` calls ``autoscale_None(A)``.
clip : bool, default: False
Determines the behavior for mapping values outside the range
``[vmin, vmax]``.
If clipping is off, values outside the range ``[vmin, vmax]`` are also
transformed by the function, resulting in values outside ``[0, 1]``.
This behavior is usually desirable, as colormaps can mark these *under*
and *over* values with specific colors.
If clipping is on, values below *vmin* are mapped to 0 and values above
*vmax* are mapped to 1. Such values become indistinguishable from
regular boundary values, which may cause misinterpretation of the data.
"""
LogNorm = make_norm_from_scale(
functools.partial(scale.LogScale, nonpositive="mask"))(Normalize)
LogNorm.__name__ = LogNorm.__qualname__ = "LogNorm"
LogNorm.__doc__ = "Normalize a given value to the 0-1 range on a log scale."
@make_norm_from_scale(
scale.SymmetricalLogScale,
init=lambda linthresh, linscale=1., vmin=None, vmax=None, clip=False, *,
base=10: None)
class SymLogNorm(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
Parameters
----------
linthresh : float
The range within which the plot is linear (to avoid having the plot
go to infinity around zero).
linscale : float, default: 1
This allows the linear range (-*linthresh* to *linthresh*) to be
stretched relative to the logarithmic range. Its value is the
number of decades to use for each half of the linear range. For
example, when *linscale* == 1.0 (the default), the space used for
the positive and negative halves of the linear range will be equal
to one decade in the logarithmic range.
base : float, default: 10
"""
@property
def linthresh(self):
return self._scale.linthresh
@linthresh.setter
def linthresh(self, value):
self._scale.linthresh = value
@make_norm_from_scale(
scale.AsinhScale,
init=lambda linear_width=1, vmin=None, vmax=None, clip=False: None)
class AsinhNorm(Normalize):
"""
The inverse hyperbolic sine scale is approximately linear near
the origin, but becomes logarithmic for larger positive
or negative values. Unlike the `SymLogNorm`, the transition between
these linear and logarithmic regions is smooth, which may reduce
the risk of visual artifacts.
.. note::
This API is provisional and may be revised in the future
based on early user feedback.
Parameters
----------
linear_width : float, default: 1
The effective width of the linear region, beyond which
the transformation becomes asymptotically logarithmic
"""
@property
def linear_width(self):
return self._scale.linear_width
@linear_width.setter
def linear_width(self, value):
self._scale.linear_width = value
class PowerNorm(Normalize):
r"""
Linearly map a given value to the 0-1 range and then apply
a power-law normalization over that range.
Parameters
----------
gamma : float
Power law exponent.
vmin, vmax : float or None
If *vmin* and/or *vmax* is not given, they are initialized from the
minimum and maximum value, respectively, of the first input
processed; i.e., ``__call__(A)`` calls ``autoscale_None(A)``.
clip : bool, default: False
Determines the behavior for mapping values outside the range
``[vmin, vmax]``.
If clipping is off, values above *vmax* are transformed by the power
function, resulting in values above 1, and values below *vmin* are linearly
transformed resulting in values below 0. This behavior is usually desirable, as
colormaps can mark these *under* and *over* values with specific colors.
If clipping is on, values below *vmin* are mapped to 0 and values above
*vmax* are mapped to 1. Such values become indistinguishable from
regular boundary values, which may cause misinterpretation of the data.
Notes
-----
The normalization formula is
.. math::
\left ( \frac{x - v_{min}}{v_{max} - v_{min}} \right )^{\gamma}
For input values below *vmin*, gamma is set to one.
"""
def __init__(self, gamma, vmin=None, vmax=None, clip=False):
super().__init__(vmin, vmax, clip)
self.gamma = gamma
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = np.ma.getmask(result)
result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
resdat = result.data
resdat -= vmin
resdat /= (vmax - vmin)
resdat[resdat > 0] = np.power(resdat[resdat > 0], gamma)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
result, is_scalar = self.process_value(value)
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
resdat = result.data
resdat[resdat > 0] = np.power(resdat[resdat > 0], 1 / gamma)
resdat *= (vmax - vmin)
resdat += vmin
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
class BoundaryNorm(Normalize):
"""
Generate a colormap index based on discrete intervals.
Unlike `Normalize` or `LogNorm`, `BoundaryNorm` maps values to integers
instead of to the interval 0-1.
"""
# Mapping to the 0-1 interval could have been done via piece-wise linear
# interpolation, but using integers seems simpler, and reduces the number
# of conversions back and forth between int and float.
def __init__(self, boundaries, ncolors, clip=False, *, extend='neither'):
"""
Parameters
----------
boundaries : array-like
Monotonically increasing sequence of at least 2 bin edges: data
falling in the n-th bin will be mapped to the n-th color.
ncolors : int
Number of colors in the colormap to be used.
clip : bool, optional
If clip is ``True``, out of range values are mapped to 0 if they
are below ``boundaries[0]`` or mapped to ``ncolors - 1`` if they
are above ``boundaries[-1]``.
If clip is ``False``, out of range values are mapped to -1 if
they are below ``boundaries[0]`` or mapped to *ncolors* if they are
above ``boundaries[-1]``. These are then converted to valid indices
by `Colormap.__call__`.
extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
Extend the number of bins to include one or both of the
regions beyond the boundaries. For example, if ``extend``
is 'min', then the color to which the region between the first
pair of boundaries is mapped will be distinct from the first
color in the colormap, and by default a
`~matplotlib.colorbar.Colorbar` will be drawn with
the triangle extension on the left or lower end.
Notes
-----
If there are fewer bins (including extensions) than colors, then the
color index is chosen by linearly interpolating the ``[0, nbins - 1]``
range onto the ``[0, ncolors - 1]`` range, effectively skipping some
colors in the middle of the colormap.
"""
if clip and extend != 'neither':
raise ValueError("'clip=True' is not compatible with 'extend'")
super().__init__(vmin=boundaries[0], vmax=boundaries[-1], clip=clip)
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
if self.N < 2:
raise ValueError("You must provide at least 2 boundaries "
f"(1 region) but you passed in {boundaries!r}")
self.Ncmap = ncolors
self.extend = extend
self._scale = None # don't use the default scale.
self._n_regions = self.N - 1 # number of colors needed
self._offset = 0
if extend in ('min', 'both'):
self._n_regions += 1
self._offset = 1
if extend in ('max', 'both'):
self._n_regions += 1
if self._n_regions > self.Ncmap:
raise ValueError(f"There are {self._n_regions} color bins "
"including extensions, but ncolors = "
f"{ncolors}; ncolors must equal or exceed the "
"number of bins")
def __call__(self, value, clip=None):
"""
This method behaves similarly to `.Normalize.__call__`, except that it
returns integers or arrays of int16.
"""
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = np.ma.getmaskarray(xx)
# Fill masked values a value above the upper boundary
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
# this gives us the bins in the lookup table in the range
# [0, _n_regions - 1] (the offset is set in the init)
iret = np.digitize(xx, self.boundaries) - 1 + self._offset
# if we have more colors than regions, stretch the region
# index computed above to full range of the color bins. This
# will make use of the full range (but skip some of the colors
# in the middle) such that the first region is mapped to the
# first color and the last region is mapped to the last color.
if self.Ncmap > self._n_regions:
if self._n_regions == 1:
# special case the 1 region case, pick the middle color
iret[iret == 0] = (self.Ncmap - 1) // 2
else:
# otherwise linearly remap the values from the region index
# to the color index spaces
iret = (self.Ncmap - 1) / (self._n_regions - 1) * iret
# cast to 16bit integers in all cases
iret = iret.astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = np.ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
def inverse(self, value):
"""
Raises
------
ValueError
BoundaryNorm is not invertible, so calling this method will always
raise an error
"""
raise ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
"""
Dummy replacement for `Normalize`, for the case where we want to use
indices directly in a `~matplotlib.cm.ScalarMappable`.
"""
def __call__(self, value, clip=None):
if np.iterable(value):
return np.ma.array(value)
return value
def inverse(self, value):
if np.iterable(value):
return np.ma.array(value)
return value
def rgb_to_hsv(arr):
"""
Convert an array of float RGB values (in the range [0, 1]) to HSV values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
(..., 3) `~numpy.ndarray`
Colors converted to HSV values in range [0, 1]
"""
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
f"shape {arr.shape} was found.")
in_shape = arr.shape
arr = np.array(
arr, copy=False,
dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
# Check if input is in the expected range
if np.any(arr_max > 1):
raise ValueError(
"Input array must be in the range [0, 1]. "
f"Found a maximum value of {arr_max.max()}"
)
if arr.min() < 0:
raise ValueError(
"Input array must be in the range [0, 1]. "
f"Found a minimum value of {arr.min()}"
)
ipos = arr_max > 0
delta = np.ptp(arr, -1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
return out.reshape(in_shape)
def hsv_to_rgb(hsv):
"""
Convert HSV values to RGB.
Parameters
----------
hsv : (..., 3) array-like
All values assumed to be in range [0, 1]
Returns
-------
(..., 3) `~numpy.ndarray`
Colors converted to RGB values in range [0, 1]
"""
hsv = np.asarray(hsv)
# check length of the last dimension, should be _some_ sort of rgb
if hsv.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
f"shape {hsv.shape} was found.")
in_shape = hsv.shape
hsv = np.array(
hsv, copy=False,
dtype=np.promote_types(hsv.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
h = hsv[..., 0]
s = hsv[..., 1]
v = hsv[..., 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.stack([r, g, b], axis=-1)
return rgb.reshape(in_shape)
def _vector_magnitude(arr):
# things that don't work here:
# * np.linalg.norm: drops mask from ma.array
# * np.sum: drops mask from ma.array unless entire vector is masked
sum_sq = 0
for i in range(arr.shape[-1]):
sum_sq += arr[..., i, np.newaxis] ** 2
return np.sqrt(sum_sq)
class LightSource:
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
`shade` is used to produce "shaded" RGB values for a data array.
`shade_rgb` can be used to combine an RGB image with an elevation map.
`hillshade` produces an illumination map of a surface.
"""
def __init__(self, azdeg=315, altdeg=45, hsv_min_val=0, hsv_max_val=1,
hsv_min_sat=1, hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
Parameters
----------
azdeg : float, default: 315 degrees (from the northwest)
The azimuth (0-360, degrees clockwise from North) of the light
source.
altdeg : float, default: 45 degrees
The altitude (0-90, degrees up from horizontal) of the light
source.
hsv_min_val : number, default: 0
The minimum value ("v" in "hsv") that the *intensity* map can shift the
output image to.
hsv_max_val : number, default: 1
The maximum value ("v" in "hsv") that the *intensity* map can shift the
output image to.
hsv_min_sat : number, default: 1
The minimum saturation value that the *intensity* map can shift the output
image to.
hsv_max_sat : number, default: 0
The maximum saturation value that the *intensity* map can shift the output
image to.
Notes
-----
For backwards compatibility, the parameters *hsv_min_val*,
*hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at
initialization as well. However, these parameters will only be used if
"blend_mode='hsv'" is passed into `shade` or `shade_rgb`.
See the documentation for `blend_hsv` for more details.
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
@property
def direction(self):
"""The unit vector direction towards the light source."""
# Azimuth is in degrees clockwise from North. Convert to radians
# counterclockwise from East (mathematical notation).
az = np.radians(90 - self.azdeg)
alt = np.radians(self.altdeg)
return np.array([
np.cos(az) * np.cos(alt),
np.sin(az) * np.cos(alt),
np.sin(alt)
])
def hillshade(self, elevation, vert_exag=1, dx=1, dy=1, fraction=1.):
"""
Calculate the illumination intensity for a surface using the defined
azimuth and elevation for the light source.
This computes the normal vectors for the surface, and then passes them
on to `shade_normals`
Parameters
----------
elevation : 2D array-like
The height values used to generate an illumination map
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs. meters) or to
exaggerate or de-emphasize topographic effects.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Returns
-------
`~numpy.ndarray`
A 2D array of illumination values between 0-1, where 0 is
completely in shadow and 1 is completely illuminated.
"""
# Because most image and raster GIS data has the first row in the array
# as the "top" of the image, dy is implicitly negative. This is
# consistent to what `imshow` assumes, as well.
dy = -dy
# compute the normal vectors from the partial derivatives
e_dy, e_dx = np.gradient(vert_exag * elevation, dy, dx)
# .view is to keep subclasses
normal = np.empty(elevation.shape + (3,)).view(type(elevation))
normal[..., 0] = -e_dx
normal[..., 1] = -e_dy
normal[..., 2] = 1
normal /= _vector_magnitude(normal)
return self.shade_normals(normal, fraction)
def shade_normals(self, normals, fraction=1.):
"""
Calculate the illumination intensity for the normal vectors of a
surface using the defined azimuth and elevation for the light source.
Imagine an artificial sun placed at infinity in some azimuth and
elevation position illuminating our surface. The parts of the surface
that slope toward the sun should brighten while those sides facing away
should become darker.
Parameters
----------
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Returns
-------
`~numpy.ndarray`
A 2D array of illumination values between 0-1, where 0 is
completely in shadow and 1 is completely illuminated.
"""
intensity = normals.dot(self.direction)
# Apply contrast stretch
imin, imax = intensity.min(), intensity.max()
intensity *= fraction
# Rescale to 0-1, keeping range before contrast stretch
# If constant slope, keep relative scaling (i.e. flat should be 0.5,
# fully occluded 0, etc.)
if (imax - imin) > 1e-6:
# Strictly speaking, this is incorrect. Negative values should be
# clipped to 0 because they're fully occluded. However, rescaling
# in this manner is consistent with the previous implementation and
# visually appears better than a "hard" clip.
intensity -= imin
intensity /= (imax - imin)
intensity = np.clip(intensity, 0, 1)
return intensity
def shade(self, data, cmap, norm=None, blend_mode='overlay', vmin=None,
vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs):
"""
Combine colormapped data values with an illumination intensity map
(a.k.a. "hillshade") of the values.
Parameters
----------
data : 2D array-like
The height values used to generate a shaded map.
cmap : `~matplotlib.colors.Colormap`
The colormap used to color the *data* array. Note that this must be
a `~matplotlib.colors.Colormap` instance. For example, rather than
passing in ``cmap='gist_earth'``, use
``cmap=plt.get_cmap('gist_earth')`` instead.
norm : `~matplotlib.colors.Normalize` instance, optional
The normalization used to scale values before colormapping. If
None, the input will be linearly scaled between its min and max.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data
values with the illumination intensity. Default is
"overlay". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to
combine an (M, N, 3) RGB array of floats (ranging 0 to 1) with
an (M, N, 1) hillshade array (also 0 to 1). (Call signature
``func(rgb, illum, **kwargs)``) Additional kwargs supplied
to this function will be passed on to the *blend_mode*
function.
vmin : float or None, optional
The minimum value used in colormapping *data*. If *None* the
minimum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vmax : float or None, optional
The maximum value used in colormapping *data*. If *None* the
maximum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs. meters) or to
exaggerate or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
**kwargs
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
`~numpy.ndarray`
An (M, N, 4) array of floats ranging between 0-1.
"""
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if norm is None:
norm = Normalize(vmin=vmin, vmax=vmax)
rgb0 = cmap(norm(data))
rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode,
vert_exag=vert_exag, dx=dx, dy=dy,
fraction=fraction, **kwargs)
# Don't overwrite the alpha channel, if present.
rgb0[..., :3] = rgb1[..., :3]
return rgb0
def shade_rgb(self, rgb, elevation, fraction=1., blend_mode='hsv',
vert_exag=1, dx=1, dy=1, **kwargs):
"""
Use this light source to adjust the colors of the *rgb* input array to
give the impression of a shaded relief map with the given *elevation*.
Parameters
----------
rgb : array-like
An (M, N, 3) RGB array, assumed to be in the range of 0 to 1.
elevation : array-like
An (M, N) array of the height values used to generate a shaded map.
fraction : number
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data values
with the illumination intensity. For backwards compatibility, this
defaults to "hsv". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to combine an
(M, N, 3) RGB array of floats (ranging 0 to 1) with an (M, N, 1)
hillshade array (also 0 to 1). (Call signature
``func(rgb, illum, **kwargs)``)
Additional kwargs supplied to this function will be passed on to
the *blend_mode* function.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs. meters) or to
exaggerate or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
**kwargs
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
`~numpy.ndarray`
An (m, n, 3) array of floats ranging between 0-1.
"""
# Calculate the "hillshade" intensity.
intensity = self.hillshade(elevation, vert_exag, dx, dy, fraction)
intensity = intensity[..., np.newaxis]
# Blend the hillshade and rgb data using the specified mode
lookup = {
'hsv': self.blend_hsv,
'soft': self.blend_soft_light,
'overlay': self.blend_overlay,
}
if blend_mode in lookup:
blend = lookup[blend_mode](rgb, intensity, **kwargs)
else:
try:
blend = blend_mode(rgb, intensity, **kwargs)
except TypeError as err:
raise ValueError('"blend_mode" must be callable or one of '
f'{lookup.keys}') from err
# Only apply result where hillshade intensity isn't masked
if np.ma.is_masked(intensity):
mask = intensity.mask[..., 0]
for i in range(3):
blend[..., i][mask] = rgb[..., i][mask]
return blend
def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None,
hsv_min_val=None, hsv_min_sat=None):
"""
Take the input data array, convert to HSV values in the given colormap,
then adjust those color values to give the impression of a shaded
relief map with a specified light source. RGBA values are returned,
which can then be used to plot the shaded image with imshow.
The color of the resulting image will be darkened by moving the (s, v)
values (in HSV colorspace) toward (hsv_min_sat, hsv_min_val) in the
shaded regions, or lightened by sliding (s, v) toward (hsv_max_sat,
hsv_max_val) in regions that are illuminated. The default extremes are
chose so that completely shaded points are nearly black (s = 1, v = 0)
and completely illuminated points are nearly white (s = 0, v = 1).
Parameters
----------
rgb : `~numpy.ndarray`
An (M, N, 3) RGB array of floats ranging from 0 to 1 (color image).
intensity : `~numpy.ndarray`
An (M, N, 1) array of floats ranging from 0 to 1 (grayscale image).
hsv_max_sat : number, optional
The maximum saturation value that the *intensity* map can shift the output
image to. If not provided, use the value provided upon initialization.
hsv_min_sat : number, optional
The minimum saturation value that the *intensity* map can shift the output
image to. If not provided, use the value provided upon initialization.
hsv_max_val : number, optional
The maximum value ("v" in "hsv") that the *intensity* map can shift the
output image to. If not provided, use the value provided upon
initialization.
hsv_min_val : number, optional
The minimum value ("v" in "hsv") that the *intensity* map can shift the
output image to. If not provided, use the value provided upon
initialization.
Returns
-------
`~numpy.ndarray`
An (M, N, 3) RGB array representing the combined images.
"""
# Backward compatibility...
if hsv_max_sat is None:
hsv_max_sat = self.hsv_max_sat
if hsv_max_val is None:
hsv_max_val = self.hsv_max_val
if hsv_min_sat is None:
hsv_min_sat = self.hsv_min_sat
if hsv_min_val is None:
hsv_min_val = self.hsv_min_val
# Expects a 2D intensity array scaled between -1 to 1...
intensity = intensity[..., 0]
intensity = 2 * intensity - 1
# Convert to rgb, then rgb to hsv
hsv = rgb_to_hsv(rgb[:, :, 0:3])
hue, sat, val = np.moveaxis(hsv, -1, 0)
# Modify hsv values (in place) to simulate illumination.
# putmask(A, mask, B) <=> A[mask] = B[mask]
np.putmask(sat, (np.abs(sat) > 1.e-10) & (intensity > 0),
(1 - intensity) * sat + intensity * hsv_max_sat)
np.putmask(sat, (np.abs(sat) > 1.e-10) & (intensity < 0),
(1 + intensity) * sat - intensity * hsv_min_sat)
np.putmask(val, intensity > 0,
(1 - intensity) * val + intensity * hsv_max_val)
np.putmask(val, intensity < 0,
(1 + intensity) * val - intensity * hsv_min_val)
np.clip(hsv[:, :, 1:], 0, 1, out=hsv[:, :, 1:])
# Convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
def blend_soft_light(self, rgb, intensity):
"""
Combine an RGB image with an intensity map using "soft light" blending,
using the "pegtop" formula.
Parameters
----------
rgb : `~numpy.ndarray`
An (M, N, 3) RGB array of floats ranging from 0 to 1 (color image).
intensity : `~numpy.ndarray`
An (M, N, 1) array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
`~numpy.ndarray`
An (M, N, 3) RGB array representing the combined images.
"""
return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2
def blend_overlay(self, rgb, intensity):
"""
Combine an RGB image with an intensity map using "overlay" blending.
Parameters
----------
rgb : `~numpy.ndarray`
An (M, N, 3) RGB array of floats ranging from 0 to 1 (color image).
intensity : `~numpy.ndarray`
An (M, N, 1) array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
ndarray
An (M, N, 3) RGB array representing the combined images.
"""
low = 2 * intensity * rgb
high = 1 - 2 * (1 - intensity) * (1 - rgb)
return np.where(rgb <= 0.5, low, high)
def from_levels_and_colors(levels, colors, extend='neither'):
"""
A helper routine to generate a cmap and a norm instance which
behave similar to contourf's levels and colors arguments.
Parameters
----------
levels : sequence of numbers
The quantization levels used to construct the `BoundaryNorm`.
Value ``v`` is quantized to level ``i`` if ``lev[i] <= v < lev[i+1]``.
colors : sequence of colors
The fill color to use for each level. If *extend* is "neither" there
must be ``n_level - 1`` colors. For an *extend* of "min" or "max" add
one extra color, and for an *extend* of "both" add two colors.
extend : {'neither', 'min', 'max', 'both'}, optional
The behaviour when a value falls out of range of the given levels.
See `~.Axes.contourf` for details.
Returns
-------
cmap : `~matplotlib.colors.Colormap`
norm : `~matplotlib.colors.Normalize`
"""
slice_map = {
'both': slice(1, -1),
'min': slice(1, None),
'max': slice(0, -1),
'neither': slice(0, None),
}
_api.check_in_list(slice_map, extend=extend)
color_slice = slice_map[extend]
n_data_colors = len(levels) - 1
n_expected = n_data_colors + color_slice.start - (color_slice.stop or 0)
if len(colors) != n_expected:
raise ValueError(
f'With extend == {extend!r} and {len(levels)} levels, '
f'expected {n_expected} colors, but got {len(colors)}')
cmap = ListedColormap(colors[color_slice], N=n_data_colors)
if extend in ['min', 'both']:
cmap.set_under(colors[0])
else:
cmap.set_under('none')
if extend in ['max', 'both']:
cmap.set_over(colors[-1])
else:
cmap.set_over('none')
cmap.colorbar_extend = extend
norm = BoundaryNorm(levels, ncolors=n_data_colors)
return cmap, norm
venv\Lib\site-packages\matplotlib\container.py
from matplotlib import cbook
from matplotlib.artist import Artist
class Container(tuple):
"""
Base class for containers.
Containers are classes that collect semantically related Artists such as
the bars of a bar plot.
"""
def __repr__(self):
return f"<{type(self).__name__} object of {len(self)} artists>"
def __new__(cls, *args, **kwargs):
return tuple.__new__(cls, args[0])
def __init__(self, kl, label=None):
self._callbacks = cbook.CallbackRegistry(signals=["pchanged"])
self._remove_method = None
self._label = str(label) if label is not None else None
def remove(self):
for c in cbook.flatten(
self, scalarp=lambda x: isinstance(x, Artist)):
if c is not None:
c.remove()
if self._remove_method:
self._remove_method(self)
def get_children(self):
return [child for child in cbook.flatten(self) if child is not None]
get_label = Artist.get_label
set_label = Artist.set_label
add_callback = Artist.add_callback
remove_callback = Artist.remove_callback
pchanged = Artist.pchanged
class BarContainer(Container):
"""
Container for the artists of bar plots (e.g. created by `.Axes.bar`).
The container can be treated as a tuple of the *patches* themselves.
Additionally, you can access these and further parameters by the
attributes.
Attributes
----------
patches : list of :class:`~matplotlib.patches.Rectangle`
The artists of the bars.
errorbar : None or :class:`~matplotlib.container.ErrorbarContainer`
A container for the error bar artists if error bars are present.
*None* otherwise.
datavalues : None or array-like
The underlying data values corresponding to the bars.
orientation : {'vertical', 'horizontal'}, default: None
If 'vertical', the bars are assumed to be vertical.
If 'horizontal', the bars are assumed to be horizontal.
"""
def __init__(self, patches, errorbar=None, *, datavalues=None,
orientation=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
self.datavalues = datavalues
self.orientation = orientation
super().__init__(patches, **kwargs)
class ErrorbarContainer(Container):
"""
Container for the artists of error bars (e.g. created by `.Axes.errorbar`).
The container can be treated as the *lines* tuple itself.
Additionally, you can access these and further parameters by the
attributes.
Attributes
----------
lines : tuple
Tuple of ``(data_line, caplines, barlinecols)``.
- data_line : A `~matplotlib.lines.Line2D` instance of x, y plot markers
and/or line.
- caplines : A tuple of `~matplotlib.lines.Line2D` instances of the error
bar caps.
- barlinecols : A tuple of `~matplotlib.collections.LineCollection` with the
horizontal and vertical error ranges.
has_xerr, has_yerr : bool
``True`` if the errorbar has x/y errors.
"""
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
super().__init__(lines, **kwargs)
class StemContainer(Container):
"""
Container for the artists created in a :meth:`.Axes.stem` plot.
The container can be treated like a namedtuple ``(markerline, stemlines,
baseline)``.
Attributes
----------
markerline : `~matplotlib.lines.Line2D`
The artist of the markers at the stem heads.
stemlines : `~matplotlib.collections.LineCollection`
The artists of the vertical lines for all stems.
baseline : `~matplotlib.lines.Line2D`
The artist of the horizontal baseline.
"""
def __init__(self, markerline_stemlines_baseline, **kwargs):
"""
Parameters
----------
markerline_stemlines_baseline : tuple
Tuple of ``(markerline, stemlines, baseline)``.
``markerline`` contains the `.Line2D` of the markers,
``stemlines`` is a `.LineCollection` of the main lines,
``baseline`` is the `.Line2D` of the baseline.
"""
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
super().__init__(markerline_stemlines_baseline, **kwargs)
venv\Lib\site-packages\matplotlib\contour.py
"""
Classes to support contour plotting and labelling for the Axes class.
"""
from contextlib import ExitStack
import functools
import math
from numbers import Integral
import numpy as np
from numpy import ma
import matplotlib as mpl
from matplotlib import _api, _docstring
from matplotlib.backend_bases import MouseButton
from matplotlib.lines import Line2D
from matplotlib.path import Path
from matplotlib.text import Text
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.cbook as cbook
import matplotlib.patches as mpatches
import matplotlib.transforms as mtransforms
def _contour_labeler_event_handler(cs, inline, inline_spacing, event):
canvas = cs.axes.get_figure(root=True).canvas
is_button = event.name == "button_press_event"
is_key = event.name == "key_press_event"
# Quit (even if not in infinite mode; this is consistent with
# MATLAB and sometimes quite useful, but will require the user to
# test how many points were actually returned before using data).
if (is_button and event.button == MouseButton.MIDDLE
or is_key and event.key in ["escape", "enter"]):
canvas.stop_event_loop()
# Pop last click.
elif (is_button and event.button == MouseButton.RIGHT
or is_key and event.key in ["backspace", "delete"]):
# Unfortunately, if one is doing inline labels, then there is currently
# no way to fix the broken contour - once humpty-dumpty is broken, he
# can't be put back together. In inline mode, this does nothing.
if not inline:
cs.pop_label()
canvas.draw()
# Add new click.
elif (is_button and event.button == MouseButton.LEFT
# On macOS/gtk, some keys return None.
or is_key and event.key is not None):
if cs.axes.contains(event)[0]:
cs.add_label_near(event.x, event.y, transform=False,
inline=inline, inline_spacing=inline_spacing)
canvas.draw()
class ContourLabeler:
"""Mixin to provide labelling capability to `.ContourSet`."""
def clabel(self, levels=None, *,
fontsize=None, inline=True, inline_spacing=5, fmt=None,
colors=None, use_clabeltext=False, manual=False,
rightside_up=True, zorder=None):
"""
Label a contour plot.
Adds labels to line contours in this `.ContourSet` (which inherits from
this mixin class).
Parameters
----------
levels : array-like, optional
A list of level values, that should be labeled. The list must be
a subset of ``cs.levels``. If not given, all levels are labeled.
fontsize : str or float, default: :rc:`font.size`
Size in points or relative size e.g., 'smaller', 'x-large'.
See `.Text.set_size` for accepted string values.
colors : :mpltype:`color` or colors or None, default: None
The label colors:
- If *None*, the color of each label matches the color of
the corresponding contour.
- If one string color, e.g., *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color.
- If a tuple of colors (string, float, RGB, etc), different labels
will be plotted in different colors in the order specified.
inline : bool, default: True
If ``True`` the underlying contour is removed where the label is
placed.
inline_spacing : float, default: 5
Space in pixels to leave on each side of label when placing inline.
This spacing will be exact for labels at locations where the
contour is straight, less so for labels on curved contours.
fmt : `.Formatter` or str or callable or dict, optional
How the levels are formatted:
- If a `.Formatter`, it is used to format all levels at once, using
its `.Formatter.format_ticks` method.
- If a str, it is interpreted as a %-style format string.
- If a callable, it is called with one level at a time and should
return the corresponding label.
- If a dict, it should directly map levels to labels.
The default is to use a standard `.ScalarFormatter`.
manual : bool or iterable, default: False
If ``True``, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*manual* can also be an iterable object of (x, y) tuples.
Contour labels will be created as if mouse is clicked at each
(x, y) position.
rightside_up : bool, default: True
If ``True``, label rotations will always be plus
or minus 90 degrees from level.
use_clabeltext : bool, default: False
If ``True``, use `.Text.set_transform_rotates_text` to ensure that
label rotation is updated whenever the Axes aspect changes.
zorder : float or None, default: ``(2 + contour.get_zorder())``
zorder of the contour labels.
Returns
-------
labels
A list of `.Text` instances for the labels.
"""
# Based on the input arguments, clabel() adds a list of "label
# specific" attributes to the ContourSet object. These attributes are
# all of the form label* and names should be fairly self explanatory.
#
# Once these attributes are set, clabel passes control to the labels()
# method (for automatic label placement) or blocking_input_loop and
# _contour_labeler_event_handler (for manual label placement).
if fmt is None:
fmt = ticker.ScalarFormatter(useOffset=False)
fmt.create_dummy_axis()
self.labelFmt = fmt
self._use_clabeltext = use_clabeltext
self.labelManual = manual
self.rightside_up = rightside_up
self._clabel_zorder = 2 + self.get_zorder() if zorder is None else zorder
if levels is None:
levels = self.levels
indices = list(range(len(self.cvalues)))
else:
levlabs = list(levels)
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
raise ValueError(f"Specified levels {levlabs} don't match "
f"available levels {self.levels}")
self.labelLevelList = levels
self.labelIndiceList = indices
self._label_font_props = font_manager.FontProperties(size=fontsize)
if colors is None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = mcolors.ListedColormap(colors, N=len(self.labelLevelList))
self.labelCValueList = list(range(len(self.labelLevelList)))
self.labelMappable = cm.ScalarMappable(cmap=cmap,
norm=mcolors.NoNorm())
self.labelXYs = []
if np.iterable(manual):
for x, y in manual:
self.add_label_near(x, y, inline, inline_spacing)
elif manual:
print('Select label locations manually using first mouse button.')
print('End manual selection with second mouse button.')
if not inline:
print('Remove last label by clicking third mouse button.')
mpl._blocking_input.blocking_input_loop(
self.axes.get_figure(root=True),
["button_press_event", "key_press_event"],
timeout=-1, handler=functools.partial(
_contour_labeler_event_handler,
self, inline, inline_spacing))
else:
self.labels(inline, inline_spacing)
return cbook.silent_list('text.Text', self.labelTexts)
def print_label(self, linecontour, labelwidth):
"""Return whether a contour is long enough to hold a label."""
return (len(linecontour) > 10 * labelwidth
or (len(linecontour)
and (np.ptp(linecontour, axis=0) > 1.2 * labelwidth).any()))
def too_close(self, x, y, lw):
"""Return whether a label is already near this location."""
thresh = (1.2 * lw) ** 2
return any((x - loc[0]) ** 2 + (y - loc[1]) ** 2 < thresh
for loc in self.labelXYs)
def _get_nth_label_width(self, nth):
"""Return the width of the *nth* label, in pixels."""
fig = self.axes.get_figure(root=False)
renderer = fig.get_figure(root=True)._get_renderer()
return (Text(0, 0,
self.get_text(self.labelLevelList[nth], self.labelFmt),
figure=fig, fontproperties=self._label_font_props)
.get_window_extent(renderer).width)
def get_text(self, lev, fmt):
"""Get the text of the label."""
if isinstance(lev, str):
return lev
elif isinstance(fmt, dict):
return fmt.get(lev, '%1.3f')
elif callable(getattr(fmt, "format_ticks", None)):
return fmt.format_ticks([*self.labelLevelList, lev])[-1]
elif callable(fmt):
return fmt(lev)
else:
return fmt % lev
def locate_label(self, linecontour, labelwidth):
"""
Find good place to draw a label (relatively flat part of the contour).
"""
ctr_size = len(linecontour)
n_blocks = int(np.ceil(ctr_size / labelwidth)) if labelwidth > 1 else 1
block_size = ctr_size if n_blocks == 1 else int(labelwidth)
# Split contour into blocks of length ``block_size``, filling the last
# block by cycling the contour start (per `np.resize` semantics). (Due
# to cycling, the index returned is taken modulo ctr_size.)
xx = np.resize(linecontour[:, 0], (n_blocks, block_size))
yy = np.resize(linecontour[:, 1], (n_blocks, block_size))
yfirst = yy[:, :1]
ylast = yy[:, -1:]
xfirst = xx[:, :1]
xlast = xx[:, -1:]
s = (yfirst - yy) * (xlast - xfirst) - (xfirst - xx) * (ylast - yfirst)
l = np.hypot(xlast - xfirst, ylast - yfirst)
# Ignore warning that divide by zero throws, as this is a valid option
with np.errstate(divide='ignore', invalid='ignore'):
distances = (abs(s) / l).sum(axis=-1)
# Labels are drawn in the middle of the block (``hbsize``) where the
# contour is the closest (per ``distances``) to a straight line, but
# not `too_close()` to a preexisting label.
hbsize = block_size // 2
adist = np.argsort(distances)
# If all candidates are `too_close()`, go back to the straightest part
# (``adist[0]``).
for idx in np.append(adist, adist[0]):
x, y = xx[idx, hbsize], yy[idx, hbsize]
if not self.too_close(x, y, labelwidth):
break
return x, y, (idx * block_size + hbsize) % ctr_size
def _split_path_and_get_label_rotation(self, path, idx, screen_pos, lw, spacing=5):
"""
Prepare for insertion of a label at index *idx* of *path*.
Parameters
----------
path : Path
The path where the label will be inserted, in data space.
idx : int
The vertex index after which the label will be inserted.
screen_pos : (float, float)
The position where the label will be inserted, in screen space.
lw : float
The label width, in screen space.
spacing : float
Extra spacing around the label, in screen space.
Returns
-------
path : Path
The path, broken so that the label can be drawn over it.
angle : float
The rotation of the label.
Notes
-----
Both tasks are done together to avoid calculating path lengths multiple times,
which is relatively costly.
The method used here involves computing the path length along the contour in
pixel coordinates and then looking (label width / 2) away from central point to
determine rotation and then to break contour if desired. The extra spacing is
taken into account when breaking the path, but not when computing the angle.
"""
xys = path.vertices
codes = path.codes
# Insert a vertex at idx/pos (converting back to data space), if there isn't yet
# a vertex there. With infinite precision one could also always insert the
# extra vertex (it will get masked out by the label below anyways), but floating
# point inaccuracies (the point can have undergone a data->screen->data
# transform loop) can slightly shift the point and e.g. shift the angle computed
# below from exactly zero to nonzero.
pos = self.get_transform().inverted().transform(screen_pos)
if not np.allclose(pos, xys[idx]):
xys = np.insert(xys, idx, pos, axis=0)
codes = np.insert(codes, idx, Path.LINETO)
# Find the connected component where the label will be inserted. Note that a
# path always starts with a MOVETO, and we consider there's an implicit
# MOVETO (closing the last path) at the end.
movetos = (codes == Path.MOVETO).nonzero()[0]
start = movetos[movetos <= idx][-1]
try:
stop = movetos[movetos > idx][0]
except IndexError:
stop = len(codes)
# Restrict ourselves to the connected component.
cc_xys = xys[start:stop]
idx -= start
# If the path is closed, rotate it s.t. it starts at the label.
is_closed_path = codes[stop - 1] == Path.CLOSEPOLY
if is_closed_path:
cc_xys = np.concatenate([cc_xys[idx:-1], cc_xys[:idx+1]])
idx = 0
# Like np.interp, but additionally vectorized over fp.
def interp_vec(x, xp, fp): return [np.interp(x, xp, col) for col in fp.T]
# Use cumulative path lengths ("cpl") as curvilinear coordinate along contour.
screen_xys = self.get_transform().transform(cc_xys)
path_cpls = np.insert(
np.cumsum(np.hypot(*np.diff(screen_xys, axis=0).T)), 0, 0)
path_cpls -= path_cpls[idx]
# Use linear interpolation to get end coordinates of label.
target_cpls = np.array([-lw/2, lw/2])
if is_closed_path: # For closed paths, target from the other end.
target_cpls[0] += (path_cpls[-1] - path_cpls[0])
(sx0, sx1), (sy0, sy1) = interp_vec(target_cpls, path_cpls, screen_xys)
angle = np.rad2deg(np.arctan2(sy1 - sy0, sx1 - sx0)) # Screen space.
if self.rightside_up: # Fix angle so text is never upside-down
angle = (angle + 90) % 180 - 90
target_cpls += [-spacing, +spacing] # Expand range by spacing.
# Get indices near points of interest; use -1 as out of bounds marker.
i0, i1 = np.interp(target_cpls, path_cpls, range(len(path_cpls)),
left=-1, right=-1)
i0 = math.floor(i0)
i1 = math.ceil(i1)
(x0, x1), (y0, y1) = interp_vec(target_cpls, path_cpls, cc_xys)
# Actually break contours (dropping zero-len parts).
new_xy_blocks = []
new_code_blocks = []
if is_closed_path:
if i0 != -1 and i1 != -1:
# This is probably wrong in the case that the entire contour would
# be discarded, but ensures that a valid path is returned and is
# consistent with behavior of mpl <3.8
points = cc_xys[i1:i0+1]
new_xy_blocks.extend([[(x1, y1)], points, [(x0, y0)]])
nlines = len(points) + 1
new_code_blocks.extend([[Path.MOVETO], [Path.LINETO] * nlines])
else:
if i0 != -1:
new_xy_blocks.extend([cc_xys[:i0 + 1], [(x0, y0)]])
new_code_blocks.extend([[Path.MOVETO], [Path.LINETO] * (i0 + 1)])
if i1 != -1:
new_xy_blocks.extend([[(x1, y1)], cc_xys[i1:]])
new_code_blocks.extend([
[Path.MOVETO], [Path.LINETO] * (len(cc_xys) - i1)])
# Back to the full path.
xys = np.concatenate([xys[:start], *new_xy_blocks, xys[stop:]])
codes = np.concatenate([codes[:start], *new_code_blocks, codes[stop:]])
return angle, Path(xys, codes)
def add_label(self, x, y, rotation, lev, cvalue):
"""Add a contour label, respecting whether *use_clabeltext* was set."""
data_x, data_y = self.axes.transData.inverted().transform((x, y))
t = Text(
data_x, data_y,
text=self.get_text(lev, self.labelFmt),
rotation=rotation,
horizontalalignment='center', verticalalignment='center',
zorder=self._clabel_zorder,
color=self.labelMappable.to_rgba(cvalue, alpha=self.get_alpha()),
fontproperties=self._label_font_props,
clip_box=self.axes.bbox)
if self._use_clabeltext:
data_rotation, = self.axes.transData.inverted().transform_angles(
[rotation], [[x, y]])
t.set(rotation=data_rotation, transform_rotates_text=True)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x, y))
# Add label to plot here - useful for manual mode label selection
self.axes.add_artist(t)
def add_label_near(self, x, y, inline=True, inline_spacing=5,
transform=None):
"""
Add a label near the point ``(x, y)``.
Parameters
----------
x, y : float
The approximate location of the label.
inline : bool, default: True
If *True* remove the segment of the contour beneath the label.
inline_spacing : int, default: 5
Space in pixels to leave on each side of label when placing
inline. This spacing will be exact for labels at locations where
the contour is straight, less so for labels on curved contours.
transform : `.Transform` or `False`, default: ``self.axes.transData``
A transform applied to ``(x, y)`` before labeling. The default
causes ``(x, y)`` to be interpreted as data coordinates. `False`
is a synonym for `.IdentityTransform`; i.e. ``(x, y)`` should be
interpreted as display coordinates.
"""
if transform is None:
transform = self.axes.transData
if transform:
x, y = transform.transform((x, y))
idx_level_min, idx_vtx_min, proj = self._find_nearest_contour(
(x, y), self.labelIndiceList)
path = self._paths[idx_level_min]
level = self.labelIndiceList.index(idx_level_min)
label_width = self._get_nth_label_width(level)
rotation, path = self._split_path_and_get_label_rotation(
path, idx_vtx_min, proj, label_width, inline_spacing)
self.add_label(*proj, rotation, self.labelLevelList[idx_level_min],
self.labelCValueList[idx_level_min])
if inline:
self._paths[idx_level_min] = path
def pop_label(self, index=-1):
"""Defaults to removing last label, but any index can be supplied"""
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
for idx, (icon, lev, cvalue) in enumerate(zip(
self.labelIndiceList,
self.labelLevelList,
self.labelCValueList,
)):
trans = self.get_transform()
label_width = self._get_nth_label_width(idx)
additions = []
for subpath in self._paths[icon]._iter_connected_components():
screen_xys = trans.transform(subpath.vertices)
# Check if long enough for a label
if self.print_label(screen_xys, label_width):
x, y, idx = self.locate_label(screen_xys, label_width)
rotation, path = self._split_path_and_get_label_rotation(
subpath, idx, (x, y),
label_width, inline_spacing)
self.add_label(x, y, rotation, lev, cvalue) # Really add label.
if inline: # If inline, add new contours
additions.append(path)
else: # If not adding label, keep old path
additions.append(subpath)
# After looping over all segments on a contour, replace old path by new one
# if inlining.
if inline:
self._paths[icon] = Path.make_compound_path(*additions)
def remove(self):
super().remove()
for text in self.labelTexts:
text.remove()
def _find_closest_point_on_path(xys, p):
"""
Parameters
----------
xys : (N, 2) array-like
Coordinates of vertices.
p : (float, float)
Coordinates of point.
Returns
-------
d2min : float
Minimum square distance of *p* to *xys*.
proj : (float, float)
Projection of *p* onto *xys*.
imin : (int, int)
Consecutive indices of vertices of segment in *xys* where *proj* is.
Segments are considered as including their end-points; i.e. if the
closest point on the path is a node in *xys* with index *i*, this
returns ``(i-1, i)``. For the special case where *xys* is a single
point, this returns ``(0, 0)``.
"""
if len(xys) == 1:
return (((p - xys[0]) ** 2).sum(), xys[0], (0, 0))
dxys = xys[1:] - xys[:-1] # Individual segment vectors.
norms = (dxys ** 2).sum(axis=1)
norms[norms == 0] = 1 # For zero-length segment, replace 0/0 by 0/1.
rel_projs = np.clip( # Project onto each segment in relative 0-1 coords.
((p - xys[:-1]) * dxys).sum(axis=1) / norms,
0, 1)[:, None]
projs = xys[:-1] + rel_projs * dxys # Projs. onto each segment, in (x, y).
d2s = ((projs - p) ** 2).sum(axis=1) # Squared distances.
imin = np.argmin(d2s)
return (d2s[imin], projs[imin], (imin, imin+1))
_docstring.interpd.register(contour_set_attributes=r"""
Attributes
----------
levels : array
The values of the contour levels.
layers : array
Same as levels for line contours; half-way between
levels for filled contours. See ``ContourSet._process_colors``.
""")
@_docstring.interpd
class ContourSet(ContourLabeler, mcoll.Collection):
"""
Store a set of contour lines or filled regions.
User-callable method: `~.Axes.clabel`
Parameters
----------
ax : `~matplotlib.axes.Axes`
levels : [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour levels.
allsegs : [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``. The lists
should look like ::
level0segs = [polygon0, polygon1, ...]
polygon0 = [[x0, y0], [x1, y1], ...]
allkinds : ``None`` or [level0kinds, level1kinds, ...]
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
should look like ::
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not ``None``, usually all polygons for a
particular contour level are grouped together so that
``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
**kwargs
Keyword arguments are as described in the docstring of
`~.Axes.contour`.
%(contour_set_attributes)s
"""
def __init__(self, ax, *args,
levels=None, filled=False, linewidths=None, linestyles=None,
hatches=(None,), alpha=None, origin=None, extent=None,
cmap=None, colors=None, norm=None, vmin=None, vmax=None,
colorizer=None, extend='neither', antialiased=None, nchunk=0,
locator=None, transform=None, negative_linestyles=None, clip_path=None,
**kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg *filled* is ``False`` (default) or ``True``.
Call signature::
ContourSet(ax, levels, allsegs, [allkinds], **kwargs)
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` object to draw on.
levels : [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour
levels.
allsegs : [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``. The lists
should look like ::
level0segs = [polygon0, polygon1, ...]
polygon0 = [[x0, y0], [x1, y1], ...]
allkinds : [level0kinds, level1kinds, ...], optional
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
should look like ::
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not ``None``, usually all polygons for a
particular contour level are grouped together so that
``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
**kwargs
Keyword arguments are as described in the docstring of
`~.Axes.contour`.
"""
if antialiased is None and filled:
# Eliminate artifacts; we are not stroking the boundaries.
antialiased = False
# The default for line contours will be taken from the
# LineCollection default, which uses :rc:`lines.antialiased`.
super().__init__(
antialiaseds=antialiased,
alpha=alpha,
clip_path=clip_path,
transform=transform,
colorizer=colorizer,
)
self.axes = ax
self.levels = levels
self.filled = filled
self.hatches = hatches
self.origin = origin
self.extent = extent
self.colors = colors
self.extend = extend
self.nchunk = nchunk
self.locator = locator
if colorizer:
self._set_colorizer_check_keywords(colorizer, cmap=cmap,
norm=norm, vmin=vmin,
vmax=vmax, colors=colors)
norm = colorizer.norm
cmap = colorizer.cmap
if (isinstance(norm, mcolors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = mcolors.LogNorm()
else:
self.logscale = False
_api.check_in_list([None, 'lower', 'upper', 'image'], origin=origin)
if self.extent is not None and len(self.extent) != 4:
raise ValueError(
"If given, 'extent' must be None or (x0, x1, y0, y1)")
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image':
self.origin = mpl.rcParams['image.origin']
self._orig_linestyles = linestyles # Only kept for user access.
self.negative_linestyles = negative_linestyles
# If negative_linestyles was not defined as a keyword argument, define
# negative_linestyles with rcParams
if self.negative_linestyles is None:
self.negative_linestyles = \
mpl.rcParams['contour.negative_linestyle']
kwargs = self._process_args(*args, **kwargs)
self._process_levels()
self._extend_min = self.extend in ['min', 'both']
self._extend_max = self.extend in ['max', 'both']
if self.colors is not None:
if mcolors.is_color_like(self.colors):
color_sequence = [self.colors]
else:
color_sequence = self.colors
ncolors = len(self.levels)
if self.filled:
ncolors -= 1
i0 = 0
# Handle the case where colors are given for the extended
# parts of the contour.
use_set_under_over = False
# if we are extending the lower end, and we've been given enough
# colors then skip the first color in the resulting cmap. For the
# extend_max case we don't need to worry about passing more colors
# than ncolors as ListedColormap will clip.
total_levels = (ncolors +
int(self._extend_min) +
int(self._extend_max))
if (len(color_sequence) == total_levels and
(self._extend_min or self._extend_max)):
use_set_under_over = True
if self._extend_min:
i0 = 1
cmap = mcolors.ListedColormap(color_sequence[i0:None], N=ncolors)
if use_set_under_over:
if self._extend_min:
cmap.set_under(color_sequence[0])
if self._extend_max:
cmap.set_over(color_sequence[-1])
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
self.set_cmap(cmap)
if norm is not None:
self.set_norm(norm)
with self.norm.callbacks.blocked(signal="changed"):
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self.norm._changed()
self._process_colors()
if self._paths is None:
self._paths = self._make_paths_from_contour_generator()
if self.filled:
if linewidths is not None:
_api.warn_external('linewidths is ignored by contourf')
# Lower and upper contour levels.
lowers, uppers = self._get_lowers_and_uppers()
self.set(
edgecolor="none",
# Default zorder taken from Collection
zorder=kwargs.pop("zorder", 1),
)
else:
self.set(
facecolor="none",
linewidths=self._process_linewidths(linewidths),
linestyle=self._process_linestyles(linestyles),
# Default zorder taken from LineCollection, which is higher
# than for filled contours so that lines are displayed on top.
zorder=kwargs.pop("zorder", 2),
label="_nolegend_",
)
self.axes.add_collection(self, autolim=False)
self.sticky_edges.x[:] = [self._mins[0], self._maxs[0]]
self.sticky_edges.y[:] = [self._mins[1], self._maxs[1]]
self.axes.update_datalim([self._mins, self._maxs])
self.axes.autoscale_view(tight=True)
self.changed() # set the colors
if kwargs:
_api.warn_external(
'The following kwargs were not used by contour: ' +
", ".join(map(repr, kwargs))
)
allsegs = property(lambda self: [
[subp.vertices for subp in p._iter_connected_components()]
for p in self.get_paths()])
allkinds = property(lambda self: [
[subp.codes for subp in p._iter_connected_components()]
for p in self.get_paths()])
alpha = property(lambda self: self.get_alpha())
linestyles = property(lambda self: self._orig_linestyles)
def get_transform(self):
"""Return the `.Transform` instance used by this ContourSet."""
if self._transform is None:
self._transform = self.axes.transData
elif (not isinstance(self._transform, mtransforms.Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def __getstate__(self):
state = self.__dict__.copy()
# the C object _contour_generator cannot currently be pickled. This
# isn't a big issue as it is not actually used once the contour has
# been calculated.
state['_contour_generator'] = None
return state
def legend_elements(self, variable_name='x', str_format=str):
"""
Return a list of artists and labels suitable for passing through
to `~.Axes.legend` which represent this ContourSet.
The labels have the form "0 < x <= 1" stating the data ranges which
the artists represent.
Parameters
----------
variable_name : str
The string used inside the inequality used on the labels.
str_format : function: float -> str
Function used to format the numbers in the labels.
Returns
-------
artists : list[`.Artist`]
A list of the artists.
labels : list[str]
A list of the labels.
"""
artists = []
labels = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
n_levels = len(self._paths)
for idx in range(n_levels):
artists.append(mpatches.Rectangle(
(0, 0), 1, 1,
facecolor=self.get_facecolor()[idx],
hatch=self.hatches[idx % len(self.hatches)],
))
lower = str_format(lowers[idx])
upper = str_format(uppers[idx])
if idx == 0 and self.extend in ('min', 'both'):
labels.append(fr'${variable_name} \leq {lower}s$')
elif idx == n_levels - 1 and self.extend in ('max', 'both'):
labels.append(fr'${variable_name} > {upper}s$')
else:
labels.append(fr'${lower} < {variable_name} \leq {upper}$')
else:
for idx, level in enumerate(self.levels):
artists.append(Line2D(
[], [],
color=self.get_edgecolor()[idx],
linewidth=self.get_linewidths()[idx],
linestyle=self.get_linestyles()[idx],
))
labels.append(fr'${variable_name} = {str_format(level)}$')
return artists, labels
def _process_args(self, *args, **kwargs):
"""
Process *args* and *kwargs*; override in derived classes.
Must set self.levels, self.zmin and self.zmax, and update Axes limits.
"""
self.levels = args[0]
allsegs = args[1]
allkinds = args[2] if len(args) > 2 else None
self.zmax = np.max(self.levels)
self.zmin = np.min(self.levels)
if allkinds is None:
allkinds = [[None] * len(segs) for segs in allsegs]
# Check lengths of levels and allsegs.
if self.filled:
if len(allsegs) != len(self.levels) - 1:
raise ValueError('must be one less number of segments as '
'levels')
else:
if len(allsegs) != len(self.levels):
raise ValueError('must be same number of segments as levels')
# Check length of allkinds.
if len(allkinds) != len(allsegs):
raise ValueError('allkinds has different length to allsegs')
# Determine x, y bounds and update axes data limits.
flatseglist = [s for seg in allsegs for s in seg]
points = np.concatenate(flatseglist, axis=0)
self._mins = points.min(axis=0)
self._maxs = points.max(axis=0)
# Each entry in (allsegs, allkinds) is a list of (segs, kinds): segs is a list
# of (N, 2) arrays of xy coordinates, kinds is a list of arrays of corresponding
# pathcodes. However, kinds can also be None; in which case all paths in that
# list are codeless (this case is normalized above). These lists are used to
# construct paths, which then get concatenated.
self._paths = [Path.make_compound_path(*map(Path, segs, kinds))
for segs, kinds in zip(allsegs, allkinds)]
return kwargs
def _make_paths_from_contour_generator(self):
"""Compute ``paths`` using C extension."""
if self._paths is not None:
return self._paths
cg = self._contour_generator
empty_path = Path(np.empty((0, 2)))
vertices_and_codes = (
map(cg.create_filled_contour, *self._get_lowers_and_uppers())
if self.filled else
map(cg.create_contour, self.levels))
return [Path(np.concatenate(vs), np.concatenate(cs)) if len(vs) else empty_path
for vs, cs in vertices_and_codes]
def _get_lowers_and_uppers(self):
"""
Return ``(lowers, uppers)`` for filled contours.
"""
lowers = self._levels[:-1]
if self.zmin == lowers[0]:
# Include minimum values in lowest interval
lowers = lowers.copy() # so we don't change self._levels
if self.logscale:
lowers[0] = 0.99 * self.zmin
else:
lowers[0] -= 1
uppers = self._levels[1:]
return (lowers, uppers)
def changed(self):
if not hasattr(self, "cvalues"):
self._process_colors() # Sets cvalues.
# Force an autoscale immediately because self.to_rgba() calls
# autoscale_None() internally with the data passed to it,
# so if vmin/vmax are not set yet, this would override them with
# content from *cvalues* rather than levels like we want
self.norm.autoscale_None(self.levels)
self.set_array(self.cvalues)
self.update_scalarmappable()
alphas = np.broadcast_to(self.get_alpha(), len(self.cvalues))
for label, cv, alpha in zip(self.labelTexts, self.labelCValues, alphas):
label.set_alpha(alpha)
label.set_color(self.labelMappable.to_rgba(cv))
super().changed()
def _autolev(self, N):
"""
Select contour levels to span the data.
The target number of levels, *N*, is used only when the
scale is not log and default locator is used.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1, min_n_ticks=1)
lev = self.locator.tick_values(self.zmin, self.zmax)
try:
if self.locator._symmetric:
return lev
except AttributeError:
pass
# Trim excess levels the locator may have supplied.
under = np.nonzero(lev < self.zmin)[0]
i0 = under[-1] if len(under) else 0
over = np.nonzero(lev > self.zmax)[0]
i1 = over[0] + 1 if len(over) else len(lev)
if self.extend in ('min', 'both'):
i0 += 1
if self.extend in ('max', 'both'):
i1 -= 1
if i1 - i0 < 3:
i0, i1 = 0, len(lev)
return lev[i0:i1]
def _process_contour_level_args(self, args, z_dtype):
"""
Determine the contour levels and store in self.levels.
"""
if self.levels is None:
if args:
levels_arg = args[0]
elif np.issubdtype(z_dtype, bool):
if self.filled:
levels_arg = [0, .5, 1]
else:
levels_arg = [.5]
else:
levels_arg = 7 # Default, hard-wired.
else:
levels_arg = self.levels
if isinstance(levels_arg, Integral):
self.levels = self._autolev(levels_arg)
else:
self.levels = np.asarray(levels_arg, np.float64)
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0:
raise ValueError("Contour levels must be increasing")
def _process_levels(self):
"""
Assign values to :attr:`layers` based on :attr:`levels`,
adding extended layers as needed if contours are filled.
For line contours, layers simply coincide with levels;
a line is a thin layer. No extended levels are needed
with line contours.
"""
# Make a private _levels to include extended regions; we
# want to leave the original levels attribute unchanged.
# (Colorbar needs this even for line contours.)
self._levels = list(self.levels)
if self.logscale:
lower, upper = 1e-250, 1e250
else:
lower, upper = -1e250, 1e250
if self.extend in ('both', 'min'):
self._levels.insert(0, lower)
if self.extend in ('both', 'max'):
self._levels.append(upper)
self._levels = np.asarray(self._levels)
if not self.filled:
self.layers = self.levels
return
# Layer values are mid-way between levels in screen space.
if self.logscale:
# Avoid overflow by taking sqrt before multiplying.
self.layers = (np.sqrt(self._levels[:-1])
* np.sqrt(self._levels[1:]))
else:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the colormapping on the contour levels
and layers, not on the actual range of the Z values. This
means we don't have to worry about bad values in Z, and we
always have the full dynamic range available for the selected
levels.
The color is based on the midpoint of the layer, except for
extended end layers. By default, the norm vmin and vmax
are the extreme values of the non-extended levels. Hence,
the layer color extremes are not the extreme values of
the colormap itself, but approach those values as the number
of levels increases. An advantage of this scheme is that
line contours, when added to filled contours, take on
colors that are consistent with those of the filled regions;
for example, a contour line on the boundary between two
regions will have a color intermediate between those
of the regions.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
# Generate integers for direct indexing.
i0, i1 = 0, len(self.levels)
if self.filled:
i1 -= 1
# Out of range indices for over and under:
if self.extend in ('both', 'min'):
i0 -= 1
if self.extend in ('both', 'max'):
i1 += 1
self.cvalues = list(range(i0, i1))
self.set_norm(mcolors.NoNorm())
else:
self.cvalues = self.layers
self.norm.autoscale_None(self.levels)
self.set_array(self.cvalues)
self.update_scalarmappable()
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
def _process_linewidths(self, linewidths):
Nlev = len(self.levels)
if linewidths is None:
default_linewidth = mpl.rcParams['contour.linewidth']
if default_linewidth is None:
default_linewidth = mpl.rcParams['lines.linewidth']
return [default_linewidth] * Nlev
elif not np.iterable(linewidths):
return [linewidths] * Nlev
else:
linewidths = list(linewidths)
return (linewidths * math.ceil(Nlev / len(linewidths)))[:Nlev]
def _process_linestyles(self, linestyles):
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
eps = - (self.zmax - self.zmin) * 1e-15
for i, lev in enumerate(self.levels):
if lev < eps:
tlinestyles[i] = self.negative_linestyles
else:
if isinstance(linestyles, str):
tlinestyles = [linestyles] * Nlev
elif np.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev / len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def _find_nearest_contour(self, xy, indices=None):
"""
Find the point in the unfilled contour plot that is closest (in screen
space) to point *xy*.
Parameters
----------
xy : tuple[float, float]
The reference point (in screen space).
indices : list of int or None, default: None
Indices of contour levels to consider. If None (the default), all levels
are considered.
Returns
-------
idx_level_min : int
The index of the contour level closest to *xy*.
idx_vtx_min : int
The index of the `.Path` segment closest to *xy* (at that level).
proj : (float, float)
The point in the contour plot closest to *xy*.
"""
# Convert each contour segment to pixel coordinates and then compare the given
# point to those coordinates for each contour. This is fast enough in normal
# cases, but speedups may be possible.
if self.filled:
raise ValueError("Method does not support filled contours")
if indices is None:
indices = range(len(self._paths))
d2min = np.inf
idx_level_min = idx_vtx_min = proj_min = None
for idx_level in indices:
path = self._paths[idx_level]
idx_vtx_start = 0
for subpath in path._iter_connected_components():
if not len(subpath.vertices):
continue
lc = self.get_transform().transform(subpath.vertices)
d2, proj, leg = _find_closest_point_on_path(lc, xy)
if d2 < d2min:
d2min = d2
idx_level_min = idx_level
idx_vtx_min = leg[1] + idx_vtx_start
proj_min = proj
idx_vtx_start += len(subpath)
return idx_level_min, idx_vtx_min, proj_min
def find_nearest_contour(self, x, y, indices=None, pixel=True):
"""
Find the point in the contour plot that is closest to ``(x, y)``.
This method does not support filled contours.
Parameters
----------
x, y : float
The reference point.
indices : list of int or None, default: None
Indices of contour levels to consider. If None (the default), all
levels are considered.
pixel : bool, default: True
If *True*, measure distance in pixel (screen) space, which is
useful for manual contour labeling; else, measure distance in axes
space.
Returns
-------
path : int
The index of the path that is closest to ``(x, y)``. Each path corresponds
to one contour level.
subpath : int
The index within that closest path of the subpath that is closest to
``(x, y)``. Each subpath corresponds to one unbroken contour line.
index : int
The index of the vertices within that subpath that are closest to
``(x, y)``.
xmin, ymin : float
The point in the contour plot that is closest to ``(x, y)``.
d2 : float
The squared distance from ``(xmin, ymin)`` to ``(x, y)``.
"""
segment = index = d2 = None
with ExitStack() as stack:
if not pixel:
# _find_nearest_contour works in pixel space. We want axes space, so
# effectively disable the transformation here by setting to identity.
stack.enter_context(self._cm_set(
transform=mtransforms.IdentityTransform()))
i_level, i_vtx, (xmin, ymin) = self._find_nearest_contour((x, y), indices)
if i_level is not None:
cc_cumlens = np.cumsum(
[*map(len, self._paths[i_level]._iter_connected_components())])
segment = cc_cumlens.searchsorted(i_vtx, "right")
index = i_vtx if segment == 0 else i_vtx - cc_cumlens[segment - 1]
d2 = (xmin-x)**2 + (ymin-y)**2
return (i_level, segment, index, xmin, ymin, d2)
def draw(self, renderer):
paths = self._paths
n_paths = len(paths)
if not self.filled or all(hatch is None for hatch in self.hatches):
super().draw(renderer)
return
# In presence of hatching, draw contours one at a time.
edgecolors = self.get_edgecolors()
if edgecolors.size == 0:
edgecolors = ("none",)
for idx in range(n_paths):
with cbook._setattr_cm(self, _paths=[paths[idx]]), self._cm_set(
hatch=self.hatches[idx % len(self.hatches)],
array=[self.get_array()[idx]],
linewidths=[self.get_linewidths()[idx % len(self.get_linewidths())]],
linestyles=[self.get_linestyles()[idx % len(self.get_linestyles())]],
edgecolors=edgecolors[idx % len(edgecolors)],
):
super().draw(renderer)
@_docstring.interpd
class QuadContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions.
This class is typically not instantiated directly by the user but by
`~.Axes.contour` and `~.Axes.contourf`.
%(contour_set_attributes)s
"""
def _process_args(self, *args, corner_mask=None, algorithm=None, **kwargs):
"""
Process args and kwargs.
"""
if args and isinstance(args[0], QuadContourSet):
if self.levels is None:
self.levels = args[0].levels
self.zmin = args[0].zmin
self.zmax = args[0].zmax
self._corner_mask = args[0]._corner_mask
contour_generator = args[0]._contour_generator
self._mins = args[0]._mins
self._maxs = args[0]._maxs
self._algorithm = args[0]._algorithm
else:
import contourpy
if algorithm is None:
algorithm = mpl.rcParams['contour.algorithm']
mpl.rcParams.validate["contour.algorithm"](algorithm)
self._algorithm = algorithm
if corner_mask is None:
if self._algorithm == "mpl2005":
# mpl2005 does not support corner_mask=True so if not
# specifically requested then disable it.
corner_mask = False
else:
corner_mask = mpl.rcParams['contour.corner_mask']
self._corner_mask = corner_mask
x, y, z = self._contour_args(args, kwargs)
contour_generator = contourpy.contour_generator(
x, y, z, name=self._algorithm, corner_mask=self._corner_mask,
line_type=contourpy.LineType.SeparateCode,
fill_type=contourpy.FillType.OuterCode,
chunk_size=self.nchunk)
t = self.get_transform()
# if the transform is not trans data, and some part of it
# contains transData, transform the xs and ys to data coordinates
if (t != self.axes.transData and
any(t.contains_branch_seperately(self.axes.transData))):
trans_to_data = t - self.axes.transData
pts = np.vstack([x.flat, y.flat]).T
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
self._mins = [ma.min(x), ma.min(y)]
self._maxs = [ma.max(x), ma.max(y)]
self._contour_generator = contour_generator
return kwargs
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
nargs = len(args)
if 0 < nargs <= 2:
z, *args = args
z = ma.asarray(z)
x, y = self._initialize_x_y(z)
elif 2 < nargs <= 4:
x, y, z_orig, *args = args
x, y, z = self._check_xyz(x, y, z_orig, kwargs)
else:
raise _api.nargs_error(fn, takes="from 1 to 4", given=nargs)
z = ma.masked_invalid(z, copy=False)
self.zmax = z.max().astype(float)
self.zmin = z.min().astype(float)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
_api.warn_external('Log scale: values of z <= 0 have been masked')
self.zmin = z.min().astype(float)
self._process_contour_level_args(args, z.dtype)
return (x, y, z)
def _check_xyz(self, x, y, z, kwargs):
"""
Check that the shapes of the input arrays match; if x and y are 1D,
convert them to 2D using meshgrid.
"""
x, y = self.axes._process_unit_info([("x", x), ("y", y)], kwargs)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(z)
if z.ndim != 2:
raise TypeError(f"Input z must be 2D, not {z.ndim}D")
if z.shape[0] < 2 or z.shape[1] < 2:
raise TypeError(f"Input z must be at least a (2, 2) shaped array, "
f"but has shape {z.shape}")
Ny, Nx = z.shape
if x.ndim != y.ndim:
raise TypeError(f"Number of dimensions of x ({x.ndim}) and y "
f"({y.ndim}) do not match")
if x.ndim == 1:
nx, = x.shape
ny, = y.shape
if nx != Nx:
raise TypeError(f"Length of x ({nx}) must match number of "
f"columns in z ({Nx})")
if ny != Ny:
raise TypeError(f"Length of y ({ny}) must match number of "
f"rows in z ({Ny})")
x, y = np.meshgrid(x, y)
elif x.ndim == 2:
if x.shape != z.shape:
raise TypeError(
f"Shapes of x {x.shape} and z {z.shape} do not match")
if y.shape != z.shape:
raise TypeError(
f"Shapes of y {y.shape} and z {z.shape} do not match")
else:
raise TypeError(f"Inputs x and y must be 1D or 2D, not {x.ndim}D")
return x, y, z
def _initialize_x_y(self, z):
"""
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i, j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
"""
if z.ndim != 2:
raise TypeError(f"Input z must be 2D, not {z.ndim}D")
elif z.shape[0] < 2 or z.shape[1] < 2:
raise TypeError(f"Input z must be at least a (2, 2) shaped array, "
f"but has shape {z.shape}")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0, x1, y0, y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0, x1, y0, y1 = (0, Nx, 0, Ny)
else:
x0, x1, y0, y1 = self.extent
dx = (x1 - x0) / Nx
dy = (y1 - y0) / Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x, y)
_docstring.interpd.register(contour_doc="""
`.contour` and `.contourf` draw contour lines and filled contours,
respectively. Except as noted, function signatures and return values
are the same for both versions.
Parameters
----------
X, Y : array-like, optional
The coordinates of the values in *Z*.
*X* and *Y* must both be 2D with the same shape as *Z* (e.g.
created via `numpy.meshgrid`), or they must both be 1-D such
that ``len(X) == N`` is the number of columns in *Z* and
``len(Y) == M`` is the number of rows in *Z*.
*X* and *Y* must both be ordered monotonically.
If not given, they are assumed to be integer indices, i.e.
``X = range(N)``, ``Y = range(M)``.
Z : (M, N) array-like
The height values over which the contour is drawn. Color-mapping is
controlled by *cmap*, *norm*, *vmin*, and *vmax*.
levels : int or array-like, optional
Determines the number and positions of the contour lines / regions.
If an int *n*, use `~matplotlib.ticker.MaxNLocator`, which tries
to automatically choose no more than *n+1* "nice" contour levels
between minimum and maximum numeric values of *Z*.
If array-like, draw contour lines at the specified levels.
The values must be in increasing order.
Returns
-------
`~.contour.QuadContourSet`
Other Parameters
----------------
corner_mask : bool, default: :rc:`contour.corner_mask`
Enable/disable corner masking, which only has an effect if *Z* is
a masked array. If ``False``, any quad touching a masked point is
masked out. If ``True``, only the triangular corners of quads
nearest those points are always masked out, other triangular
corners comprising three unmasked points are contoured as usual.
colors : :mpltype:`color` or list of :mpltype:`color`, optional
The colors of the levels, i.e. the lines for `.contour` and the
areas for `.contourf`.
The sequence is cycled for the levels in ascending order. If the
sequence is shorter than the number of levels, it's repeated.
As a shortcut, a single color may be used in place of one-element lists, i.e.
``'red'`` instead of ``['red']`` to color all levels with the same color.
.. versionchanged:: 3.10
Previously a single color had to be expressed as a string, but now any
valid color format may be passed.
By default (value *None*), the colormap specified by *cmap*
will be used.
alpha : float, default: 1
The alpha blending value, between 0 (transparent) and 1 (opaque).
%(cmap_doc)s
This parameter is ignored if *colors* is set.
%(norm_doc)s
This parameter is ignored if *colors* is set.
%(vmin_vmax_doc)s
If *vmin* or *vmax* are not given, the default color scaling is based on
*levels*.
This parameter is ignored if *colors* is set.
%(colorizer_doc)s
This parameter is ignored if *colors* is set.
origin : {*None*, 'upper', 'lower', 'image'}, default: None
Determines the orientation and exact position of *Z* by specifying
the position of ``Z[0, 0]``. This is only relevant, if *X*, *Y*
are not given.
- *None*: ``Z[0, 0]`` is at X=0, Y=0 in the lower left corner.
- 'lower': ``Z[0, 0]`` is at X=0.5, Y=0.5 in the lower left corner.
- 'upper': ``Z[0, 0]`` is at X=N+0.5, Y=0.5 in the upper left
corner.
- 'image': Use the value from :rc:`image.origin`.
extent : (x0, x1, y0, y1), optional
If *origin* is not *None*, then *extent* is interpreted as in
`.imshow`: it gives the outer pixel boundaries. In this case, the
position of Z[0, 0] is the center of the pixel, not a corner. If
*origin* is *None*, then (*x0*, *y0*) is the position of Z[0, 0],
and (*x1*, *y1*) is the position of Z[-1, -1].
This argument is ignored if *X* and *Y* are specified in the call
to contour.
locator : ticker.Locator subclass, optional
The locator is used to determine the contour levels if they
are not given explicitly via *levels*.
Defaults to `~.ticker.MaxNLocator`.
extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
Determines the ``contourf``-coloring of values that are outside the
*levels* range.
If 'neither', values outside the *levels* range are not colored.
If 'min', 'max' or 'both', color the values below, above or below
and above the *levels* range.
Values below ``min(levels)`` and above ``max(levels)`` are mapped
to the under/over values of the `.Colormap`. Note that most
colormaps do not have dedicated colors for these by default, so
that the over and under values are the edge values of the colormap.
You may want to set these values explicitly using
`.Colormap.set_under` and `.Colormap.set_over`.
.. note::
An existing `.QuadContourSet` does not get notified if
properties of its colormap are changed. Therefore, an explicit
call `~.ContourSet.changed()` is needed after modifying the
colormap. The explicit call can be left out, if a colorbar is
assigned to the `.QuadContourSet` because it internally calls
`~.ContourSet.changed()`.
Example::
x = np.arange(1, 10)
y = x.reshape(-1, 1)
h = x * y
cs = plt.contourf(h, levels=[10, 30, 50],
colors=['#808080', '#A0A0A0', '#C0C0C0'], extend='both')
cs.cmap.set_over('red')
cs.cmap.set_under('blue')
cs.changed()
xunits, yunits : registered units, optional
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
antialiased : bool, optional
Enable antialiasing, overriding the defaults. For
filled contours, the default is *False*. For line contours,
it is taken from :rc:`lines.antialiased`.
nchunk : int >= 0, optional
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of *nchunk* by *nchunk* quads.
Chunking reduces the maximum length of polygons generated by the
contouring algorithm which reduces the rendering workload passed
on to the backend and also requires slightly less RAM. It can
however introduce rendering artifacts at chunk boundaries depending
on the backend, the *antialiased* flag and value of *alpha*.
linewidths : float or array-like, default: :rc:`contour.linewidth`
*Only applies to* `.contour`.
The line width of the contour lines.
If a number, all levels will be plotted with this linewidth.
If a sequence, the levels in ascending order will be plotted with
the linewidths in the order specified.
If None, this falls back to :rc:`lines.linewidth`.
linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional
*Only applies to* `.contour`.
If *linestyles* is *None*, the default is 'solid' unless the lines are
monochrome. In that case, negative contours will instead take their
linestyle from the *negative_linestyles* argument.
*linestyles* can also be an iterable of the above strings specifying a set
of linestyles to be used. If this iterable is shorter than the number of
contour levels it will be repeated as necessary.
negative_linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, \
optional
*Only applies to* `.contour`.
If *linestyles* is *None* and the lines are monochrome, this argument
specifies the line style for negative contours.
If *negative_linestyles* is *None*, the default is taken from
:rc:`contour.negative_linestyle`.
*negative_linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this iterable is shorter than
the number of contour levels it will be repeated as necessary.
hatches : list[str], optional
*Only applies to* `.contourf`.
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour.
algorithm : {'mpl2005', 'mpl2014', 'serial', 'threaded'}, optional
Which contouring algorithm to use to calculate the contour lines and
polygons. The algorithms are implemented in
`ContourPy `_, consult the
`ContourPy documentation `_ for
further information.
The default is taken from :rc:`contour.algorithm`.
clip_path : `~matplotlib.patches.Patch` or `.Path` or `.TransformedPath`
Set the clip path. See `~matplotlib.artist.Artist.set_clip_path`.
.. versionadded:: 3.8
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
Notes
-----
1. `.contourf` differs from the MATLAB version in that it does not draw
the polygon edges. To draw edges, add line contours with calls to
`.contour`.
2. `.contourf` fills intervals that are closed at the top; that is, for
boundaries *z1* and *z2*, the filled region is::
z1 < Z <= z2
except for the lowest interval, which is closed on both sides (i.e.
it includes the lowest value).
3. `.contour` and `.contourf` use a `marching squares
`_ algorithm to
compute contour locations. More information can be found in
`ContourPy documentation `_.
""" % _docstring.interpd.params)
venv\Lib\site-packages\matplotlib\dates.py
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime` and the add-on module dateutil_.
By default, Matplotlib uses the units machinery described in
`~matplotlib.units` to convert `datetime.datetime`, and `numpy.datetime64`
objects when plotted on an x- or y-axis. The user does not
need to do anything for dates to be formatted, but dates often have strict
formatting needs, so this module provides many tick locators and formatters.
A basic example using `numpy.datetime64` is::
import numpy as np
times = np.arange(np.datetime64('2001-01-02'),
np.datetime64('2002-02-03'), np.timedelta64(75, 'm'))
y = np.random.randn(len(times))
fig, ax = plt.subplots()
ax.plot(times, y)
.. seealso::
- :doc:`/gallery/text_labels_and_annotations/date`
- :doc:`/gallery/ticks/date_concise_formatter`
- :doc:`/gallery/ticks/date_demo_convert`
.. _date-format:
Matplotlib date format
----------------------
Matplotlib represents dates using floating point numbers specifying the number
of days since a default epoch of 1970-01-01 UTC; for example,
1970-01-01, 06:00 is the floating point number 0.25. The formatters and
locators require the use of `datetime.datetime` objects, so only dates between
year 0001 and 9999 can be represented. Microsecond precision
is achievable for (approximately) 70 years on either side of the epoch, and
20 microseconds for the rest of the allowable range of dates (year 0001 to
9999). The epoch can be changed at import time via `.dates.set_epoch` or
:rc:`date.epoch` to other dates if necessary; see
:doc:`/gallery/ticks/date_precision_and_epochs` for a discussion.
.. note::
Before Matplotlib 3.3, the epoch was 0000-12-31 which lost modern
microsecond precision and also made the default axis limit of 0 an invalid
datetime. In 3.3 the epoch was changed as above. To convert old
ordinal floats to the new epoch, users can do::
new_ordinal = old_ordinal + mdates.date2num(np.datetime64('0000-12-31'))
There are a number of helper functions to convert between :mod:`datetime`
objects and Matplotlib dates:
.. currentmodule:: matplotlib.dates
.. autosummary::
:nosignatures:
datestr2num
date2num
num2date
num2timedelta
drange
set_epoch
get_epoch
.. note::
Like Python's `datetime.datetime`, Matplotlib uses the Gregorian calendar
for all conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and Matplotlib give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [1]: date(2006, 4, 1).toordinal() - date(1, 1, 1).toordinal()
Out[1]: 732401
All the Matplotlib date converters, locators and formatters are timezone aware.
If no explicit timezone is provided, :rc:`timezone` is assumed, provided as a
string. If you want to use a different timezone, pass the *tz* keyword
argument of `num2date` to any date tick locators or formatters you create. This
can be either a `datetime.tzinfo` instance or a string with the timezone name
that can be parsed by `~dateutil.tz.gettz`.
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
The dateutil_ module provides additional code to handle date ticking, making it
easy to place ticks on any kinds of dates. See examples below.
.. _dateutil: https://dateutil.readthedocs.io
.. _date-locators:
Date tick locators
------------------
Most of the date tick locators can locate single or multiple ticks. For example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on Mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on Mondays and Saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on Mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
The available date tick locators are:
* `MicrosecondLocator`: Locate microseconds.
* `SecondLocator`: Locate seconds.
* `MinuteLocator`: Locate minutes.
* `HourLocator`: Locate hours.
* `DayLocator`: Locate specified days of the month.
* `WeekdayLocator`: Locate days of the week, e.g., MO, TU.
* `MonthLocator`: Locate months, e.g., 7 for July.
* `YearLocator`: Locate years that are multiples of base.
* `RRuleLocator`: Locate using a `rrulewrapper`.
`rrulewrapper` is a simple wrapper around dateutil_'s `dateutil.rrule`
which allow almost arbitrary date tick specifications.
See :doc:`rrule example `.
* `AutoDateLocator`: On autoscale, this class picks the best `DateLocator`
(e.g., `RRuleLocator`) to set the view limits and the tick locations. If
called with ``interval_multiples=True`` it will make ticks line up with
sensible multiples of the tick intervals. For example, if the interval is
4 hours, it will pick hours 0, 4, 8, etc. as ticks. This behaviour is not
guaranteed by default.
.. _date-formatters:
Date formatters
---------------
The available date formatters are:
* `AutoDateFormatter`: attempts to figure out the best format to use. This is
most useful when used with the `AutoDateLocator`.
* `ConciseDateFormatter`: also attempts to figure out the best format to use,
and to make the format as compact as possible while still having complete
date information. This is most useful when used with the `AutoDateLocator`.
* `DateFormatter`: use `~datetime.datetime.strftime` format strings.
"""
import datetime
import functools
import logging
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
__all__ = ('datestr2num', 'date2num', 'num2date', 'num2timedelta', 'drange',
'set_epoch', 'get_epoch', 'DateFormatter', 'ConciseDateFormatter',
'AutoDateFormatter', 'DateLocator', 'RRuleLocator',
'AutoDateLocator', 'YearLocator', 'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'DateConverter', 'ConciseDateConverter', 'rrulewrapper')
_log = logging.getLogger(__name__)
UTC = datetime.timezone.utc
def _get_tzinfo(tz=None):
"""
Generate `~datetime.tzinfo` from a string or return `~datetime.tzinfo`.
If None, retrieve the preferred timezone from the rcParams dictionary.
"""
tz = mpl._val_or_rc(tz, 'timezone')
if tz == 'UTC':
return UTC
if isinstance(tz, str):
tzinfo = dateutil.tz.gettz(tz)
if tzinfo is None:
raise ValueError(f"{tz} is not a valid timezone as parsed by"
" dateutil.tz.gettz.")
return tzinfo
if isinstance(tz, datetime.tzinfo):
return tz
raise TypeError(f"tz must be string or tzinfo subclass, not {tz!r}.")
# Time-related constants.
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
# EPOCH_OFFSET is not used by matplotlib
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
# default epoch: passed to np.datetime64...
_epoch = None
def _reset_epoch_test_example():
"""
Reset the Matplotlib date epoch so it can be set again.
Only for use in tests and examples.
"""
global _epoch
_epoch = None
def set_epoch(epoch):
"""
Set the epoch (origin for dates) for datetime calculations.
The default epoch is :rc:`date.epoch`.
If microsecond accuracy is desired, the date being plotted needs to be
within approximately 70 years of the epoch. Matplotlib internally
represents dates as days since the epoch, so floating point dynamic
range needs to be within a factor of 2^52.
`~.dates.set_epoch` must be called before any dates are converted
(i.e. near the import section) or a RuntimeError will be raised.
See also :doc:`/gallery/ticks/date_precision_and_epochs`.
Parameters
----------
epoch : str
valid UTC date parsable by `numpy.datetime64` (do not include
timezone).
"""
global _epoch
if _epoch is not None:
raise RuntimeError('set_epoch must be called before dates plotted.')
_epoch = epoch
def get_epoch():
"""
Get the epoch used by `.dates`.
Returns
-------
epoch : str
String for the epoch (parsable by `numpy.datetime64`).
"""
global _epoch
_epoch = mpl._val_or_rc(_epoch, 'date.epoch')
return _epoch
def _dt64_to_ordinalf(d):
"""
Convert `numpy.datetime64` or an `numpy.ndarray` of those types to
Gregorian date as UTC float relative to the epoch (see `.get_epoch`).
Roundoff is float64 precision. Practically: microseconds for dates
between 290301 BC, 294241 AD, milliseconds for larger dates
(see `numpy.datetime64`).
"""
# the "extra" ensures that we at least allow the dynamic range out to
# seconds. That should get out to +/-2e11 years.
dseconds = d.astype('datetime64[s]')
extra = (d - dseconds).astype('timedelta64[ns]')
t0 = np.datetime64(get_epoch(), 's')
dt = (dseconds - t0).astype(np.float64)
dt += extra.astype(np.float64) / 1.0e9
dt = dt / SEC_PER_DAY
NaT_int = np.datetime64('NaT').astype(np.int64)
d_int = d.astype(np.int64)
dt[d_int == NaT_int] = np.nan
return dt
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a `.datetime`.
The input date *x* is a float in ordinal days at UTC, and the output will
be the specified `.datetime` object corresponding to that time in
timezone *tz*, or if *tz* is ``None``, in the timezone specified in
:rc:`timezone`.
"""
tz = _get_tzinfo(tz)
dt = (np.datetime64(get_epoch()) +
np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us'))
if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'):
raise ValueError(f'Date ordinal {x} converts to {dt} (using '
f'epoch {get_epoch()}), but Matplotlib dates must be '
'between year 0001 and 9999.')
# convert from datetime64 to datetime:
dt = dt.tolist()
# datetime64 is always UTC:
dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC'))
# but maybe we are working in a different timezone so move.
dt = dt.astimezone(tz)
# fix round off errors
if np.abs(x) > 70 * 365:
# if x is big, round off to nearest twenty microseconds.
# This avoids floating point roundoff error
ms = round(dt.microsecond / 20) * 20
if ms == 1000000:
dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1)
else:
dt = dt.replace(microsecond=ms)
return dt
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf, otypes="O")
# a version of dateutil.parser.parse that can operate on numpy arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using `dateutil.parser.parse`.
Parameters
----------
d : str or sequence of str
The dates to convert.
default : datetime.datetime, optional
The default date to use when fields are missing in *d*.
"""
if isinstance(d, str):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [date2num(dateutil.parser.parse(s, default=default))
for s in d]
return np.asarray(d)
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
Convert datetime objects to Matplotlib dates.
Parameters
----------
d : `datetime.datetime` or `numpy.datetime64` or sequences of these
Returns
-------
float or sequence of floats
Number of days since the epoch. See `.get_epoch` for the
epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`. If
the epoch is "1970-01-01T00:00:00" (default) then noon Jan 1 1970
("1970-01-01T12:00:00") returns 0.5.
Notes
-----
The Gregorian calendar is assumed; this is not universal practice.
For details see the module docstring.
"""
# Unpack in case of e.g. Pandas or xarray object
d = cbook._unpack_to_numpy(d)
# make an iterable, but save state to unpack later:
iterable = np.iterable(d)
if not iterable:
d = [d]
masked = np.ma.is_masked(d)
mask = np.ma.getmask(d)
d = np.asarray(d)
# convert to datetime64 arrays, if not already:
if not np.issubdtype(d.dtype, np.datetime64):
# datetime arrays
if not d.size:
# deals with an empty array...
return d
tzi = getattr(d[0], 'tzinfo', None)
if tzi is not None:
# make datetime naive:
d = [dt.astimezone(UTC).replace(tzinfo=None) for dt in d]
d = np.asarray(d)
d = d.astype('datetime64[us]')
d = np.ma.masked_array(d, mask=mask) if masked else d
d = _dt64_to_ordinalf(d)
return d if iterable else d[0]
def num2date(x, tz=None):
"""
Convert Matplotlib dates to `~datetime.datetime` objects.
Parameters
----------
x : float or sequence of floats
Number of days (fraction part represents hours, minutes, seconds)
since the epoch. See `.get_epoch` for the
epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Timezone of *x*. If a string, *tz* is passed to `dateutil.tz`.
Returns
-------
`~datetime.datetime` or sequence of `~datetime.datetime`
Dates are returned in timezone *tz*.
If *x* is a sequence, a sequence of `~datetime.datetime` objects will
be returned.
Notes
-----
The Gregorian calendar is assumed; this is not universal practice.
For details, see the module docstring.
"""
tz = _get_tzinfo(tz)
return _from_ordinalf_np_vectorized(x, tz).tolist()
_ordinalf_to_timedelta_np_vectorized = np.vectorize(
lambda x: datetime.timedelta(days=x), otypes="O")
def num2timedelta(x):
"""
Convert number of days to a `~datetime.timedelta` object.
If *x* is a sequence, a sequence of `~datetime.timedelta` objects will
be returned.
Parameters
----------
x : float, sequence of floats
Number of days. The fraction part represents hours, minutes, seconds.
Returns
-------
`datetime.timedelta` or list[`datetime.timedelta`]
"""
return _ordinalf_to_timedelta_np_vectorized(x).tolist()
def drange(dstart, dend, delta):
"""
Return a sequence of equally spaced Matplotlib dates.
The dates start at *dstart* and reach up to, but not including *dend*.
They are spaced by *delta*.
Parameters
----------
dstart, dend : `~datetime.datetime`
The date limits.
delta : `datetime.timedelta`
Spacing of the dates.
Returns
-------
`numpy.array`
A list floats representing Matplotlib dates.
"""
f1 = date2num(dstart)
f2 = date2num(dend)
step = delta.total_seconds() / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greater than or equal to dend,
# just subtract one delta
dinterval_end -= delta
num -= 1
f2 = date2num(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
def _wrap_in_tex(text):
p = r'([a-zA-Z]+)'
ret_text = re.sub(p, r'}$\1$\\mathdefault{', text)
# Braces ensure symbols are not spaced like binary operators.
ret_text = ret_text.replace('-', '{-}').replace(':', '{:}')
# To not concatenate space between numbers.
ret_text = ret_text.replace(' ', r'\;')
ret_text = '$\\mathdefault{' + ret_text + '}$'
ret_text = ret_text.replace('$\\mathdefault{}$', '')
return ret_text
## date tick locators and formatters ###
class DateFormatter(ticker.Formatter):
"""
Format a tick (in days since the epoch) with a
`~datetime.datetime.strftime` format string.
"""
def __init__(self, fmt, tz=None, *, usetex=None):
"""
Parameters
----------
fmt : str
`~datetime.datetime.strftime` format string
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
usetex : bool, default: :rc:`text.usetex`
To enable/disable the use of TeX's math mode for rendering the
results of the formatter.
"""
self.tz = _get_tzinfo(tz)
self.fmt = fmt
self._usetex = mpl._val_or_rc(usetex, 'text.usetex')
def __call__(self, x, pos=0):
result = num2date(x, self.tz).strftime(self.fmt)
return _wrap_in_tex(result) if self._usetex else result
def set_tzinfo(self, tz):
self.tz = _get_tzinfo(tz)
class ConciseDateFormatter(ticker.Formatter):
"""
A `.Formatter` which attempts to figure out the best format to use for the
date, and to make it as compact as possible, but still be complete. This is
most useful when used with the `AutoDateLocator`::
>>> locator = AutoDateLocator()
>>> formatter = ConciseDateFormatter(locator)
Parameters
----------
locator : `.ticker.Locator`
Locator that this axis is using.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone, passed to `.dates.num2date`.
formats : list of 6 strings, optional
Format strings for 6 levels of tick labelling: mostly years,
months, days, hours, minutes, and seconds. Strings use
the same format codes as `~datetime.datetime.strftime`. Default is
``['%Y', '%b', '%d', '%H:%M', '%H:%M', '%S.%f']``
zero_formats : list of 6 strings, optional
Format strings for tick labels that are "zeros" for a given tick
level. For instance, if most ticks are months, ticks around 1 Jan 2005
will be labeled "Dec", "2005", "Feb". The default is
``['', '%Y', '%b', '%b-%d', '%H:%M', '%H:%M']``
offset_formats : list of 6 strings, optional
Format strings for the 6 levels that is applied to the "offset"
string found on the right side of an x-axis, or top of a y-axis.
Combined with the tick labels this should completely specify the
date. The default is::
['', '%Y', '%Y-%b', '%Y-%b-%d', '%Y-%b-%d', '%Y-%b-%d %H:%M']
show_offset : bool, default: True
Whether to show the offset or not.
usetex : bool, default: :rc:`text.usetex`
To enable/disable the use of TeX's math mode for rendering the results
of the formatter.
Examples
--------
See :doc:`/gallery/ticks/date_concise_formatter`
.. plot::
import datetime
import matplotlib.dates as mdates
base = datetime.datetime(2005, 2, 1)
dates = np.array([base + datetime.timedelta(hours=(2 * i))
for i in range(732)])
N = len(dates)
np.random.seed(19680801)
y = np.cumsum(np.random.randn(N))
fig, ax = plt.subplots(constrained_layout=True)
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.plot(dates, y)
ax.set_title('Concise Date Formatter')
"""
def __init__(self, locator, tz=None, formats=None, offset_formats=None,
zero_formats=None, show_offset=True, *, usetex=None):
"""
Autoformat the date labels. The default format is used to form an
initial string, and then redundant elements are removed.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = '%Y'
# there are 6 levels with each level getting a specific format
# 0: mostly years, 1: months, 2: days,
# 3: hours, 4: minutes, 5: seconds
if formats:
if len(formats) != 6:
raise ValueError('formats argument must be a list of '
'6 format strings (or None)')
self.formats = formats
else:
self.formats = ['%Y', # ticks are mostly years
'%b', # ticks are mostly months
'%d', # ticks are mostly days
'%H:%M', # hrs
'%H:%M', # min
'%S.%f', # secs
]
# fmt for zeros ticks at this level. These are
# ticks that should be labeled w/ info the level above.
# like 1 Jan can just be labelled "Jan". 02:02:00 can
# just be labeled 02:02.
if zero_formats:
if len(zero_formats) != 6:
raise ValueError('zero_formats argument must be a list of '
'6 format strings (or None)')
self.zero_formats = zero_formats
elif formats:
# use the users formats for the zero tick formats
self.zero_formats = [''] + self.formats[:-1]
else:
# make the defaults a bit nicer:
self.zero_formats = [''] + self.formats[:-1]
self.zero_formats[3] = '%b-%d'
if offset_formats:
if len(offset_formats) != 6:
raise ValueError('offset_formats argument must be a list of '
'6 format strings (or None)')
self.offset_formats = offset_formats
else:
self.offset_formats = ['',
'%Y',
'%Y-%b',
'%Y-%b-%d',
'%Y-%b-%d',
'%Y-%b-%d %H:%M']
self.offset_string = ''
self.show_offset = show_offset
self._usetex = mpl._val_or_rc(usetex, 'text.usetex')
def __call__(self, x, pos=None):
formatter = DateFormatter(self.defaultfmt, self._tz,
usetex=self._usetex)
return formatter(x, pos=pos)
def format_ticks(self, values):
tickdatetime = [num2date(value, tz=self._tz) for value in values]
tickdate = np.array([tdt.timetuple()[:6] for tdt in tickdatetime])
# basic algorithm:
# 1) only display a part of the date if it changes over the ticks.
# 2) don't display the smaller part of the date if:
# it is always the same or if it is the start of the
# year, month, day etc.
# fmt for most ticks at this level
fmts = self.formats
# format beginnings of days, months, years, etc.
zerofmts = self.zero_formats
# offset fmt are for the offset in the upper left of the
# or lower right of the axis.
offsetfmts = self.offset_formats
show_offset = self.show_offset
# determine the level we will label at:
# mostly 0: years, 1: months, 2: days,
# 3: hours, 4: minutes, 5: seconds, 6: microseconds
for level in range(5, -1, -1):
unique = np.unique(tickdate[:, level])
if len(unique) > 1:
# if 1 is included in unique, the year is shown in ticks
if level < 2 and np.any(unique == 1):
show_offset = False
break
elif level == 0:
# all tickdate are the same, so only micros might be different
# set to the most precise (6: microseconds doesn't exist...)
level = 5
# level is the basic level we will label at.
# now loop through and decide the actual ticklabels
zerovals = [0, 1, 1, 0, 0, 0, 0]
labels = [''] * len(tickdate)
for nn in range(len(tickdate)):
if level < 5:
if tickdate[nn][level] == zerovals[level]:
fmt = zerofmts[level]
else:
fmt = fmts[level]
else:
# special handling for seconds + microseconds
if (tickdatetime[nn].second == tickdatetime[nn].microsecond
== 0):
fmt = zerofmts[level]
else:
fmt = fmts[level]
labels[nn] = tickdatetime[nn].strftime(fmt)
# special handling of seconds and microseconds:
# strip extra zeros and decimal if possible.
# this is complicated by two factors. 1) we have some level-4 strings
# here (i.e. 03:00, '0.50000', '1.000') 2) we would like to have the
# same number of decimals for each string (i.e. 0.5 and 1.0).
if level >= 5:
trailing_zeros = min(
(len(s) - len(s.rstrip('0')) for s in labels if '.' in s),
default=None)
if trailing_zeros:
for nn in range(len(labels)):
if '.' in labels[nn]:
labels[nn] = labels[nn][:-trailing_zeros].rstrip('.')
if show_offset:
# set the offset string:
if (self._locator.axis and
self._locator.axis.__name__ in ('xaxis', 'yaxis')
and self._locator.axis.get_inverted()):
self.offset_string = tickdatetime[0].strftime(offsetfmts[level])
else:
self.offset_string = tickdatetime[-1].strftime(offsetfmts[level])
if self._usetex:
self.offset_string = _wrap_in_tex(self.offset_string)
else:
self.offset_string = ''
if self._usetex:
return [_wrap_in_tex(l) for l in labels]
else:
return labels
def get_offset(self):
return self.offset_string
def format_data_short(self, value):
return num2date(value, tz=self._tz).strftime('%Y-%m-%d %H:%M:%S')
class AutoDateFormatter(ticker.Formatter):
"""
A `.Formatter` which attempts to figure out the best format to use. This
is most useful when used with the `AutoDateLocator`.
`.AutoDateFormatter` has a ``.scale`` dictionary that maps tick scales (the
interval in days between one major tick) to format strings; this dictionary
defaults to ::
self.scaled = {
DAYS_PER_YEAR: rcParams['date.autoformatter.year'],
DAYS_PER_MONTH: rcParams['date.autoformatter.month'],
1: rcParams['date.autoformatter.day'],
1 / HOURS_PER_DAY: rcParams['date.autoformatter.hour'],
1 / MINUTES_PER_DAY: rcParams['date.autoformatter.minute'],
1 / SEC_PER_DAY: rcParams['date.autoformatter.second'],
1 / MUSECONDS_PER_DAY: rcParams['date.autoformatter.microsecond'],
}
The formatter uses the format string corresponding to the lowest key in
the dictionary that is greater or equal to the current scale. Dictionary
entries can be customized::
locator = AutoDateLocator()
formatter = AutoDateFormatter(locator)
formatter.scaled[1/(24*60)] = '%M:%S' # only show min and sec
Custom callables can also be used instead of format strings. The following
example shows how to use a custom format function to strip trailing zeros
from decimal seconds and adds the date to the first ticklabel::
def my_format_function(x, pos=None):
x = matplotlib.dates.num2date(x)
if pos == 0:
fmt = '%D %H:%M:%S.%f'
else:
fmt = '%H:%M:%S.%f'
label = x.strftime(fmt)
label = label.rstrip("0")
label = label.rstrip(".")
return label
formatter.scaled[1/(24*60)] = my_format_function
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc.).
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d', *,
usetex=None):
"""
Autoformat the date labels.
Parameters
----------
locator : `.ticker.Locator`
Locator that this axis is using.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
defaultfmt : str
The default format to use if none of the values in ``self.scaled``
are greater than the unit returned by ``locator._get_unit()``.
usetex : bool, default: :rc:`text.usetex`
To enable/disable the use of TeX's math mode for rendering the
results of the formatter. If any entries in ``self.scaled`` are set
as functions, then it is up to the customized function to enable or
disable TeX's math mode itself.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
rcParams = mpl.rcParams
self._usetex = mpl._val_or_rc(usetex, 'text.usetex')
self.scaled = {
DAYS_PER_YEAR: rcParams['date.autoformatter.year'],
DAYS_PER_MONTH: rcParams['date.autoformatter.month'],
1: rcParams['date.autoformatter.day'],
1 / HOURS_PER_DAY: rcParams['date.autoformatter.hour'],
1 / MINUTES_PER_DAY: rcParams['date.autoformatter.minute'],
1 / SEC_PER_DAY: rcParams['date.autoformatter.second'],
1 / MUSECONDS_PER_DAY: rcParams['date.autoformatter.microsecond']
}
def _set_locator(self, locator):
self._locator = locator
def __call__(self, x, pos=None):
try:
locator_unit_scale = float(self._locator._get_unit())
except AttributeError:
locator_unit_scale = 1
# Pick the first scale which is greater than the locator unit.
fmt = next((fmt for scale, fmt in sorted(self.scaled.items())
if scale >= locator_unit_scale),
self.defaultfmt)
if isinstance(fmt, str):
self._formatter = DateFormatter(fmt, self._tz, usetex=self._usetex)
result = self._formatter(x, pos)
elif callable(fmt):
result = fmt(x, pos)
else:
raise TypeError(f'Unexpected type passed to {self!r}.')
return result
class rrulewrapper:
"""
A simple wrapper around a `dateutil.rrule` allowing flexible
date tick specifications.
"""
def __init__(self, freq, tzinfo=None, **kwargs):
"""
Parameters
----------
freq : {YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY}
Tick frequency. These constants are defined in `dateutil.rrule`,
but they are accessible from `matplotlib.dates` as well.
tzinfo : `datetime.tzinfo`, optional
Time zone information. The default is None.
**kwargs
Additional keyword arguments are passed to the `dateutil.rrule`.
"""
kwargs['freq'] = freq
self._base_tzinfo = tzinfo
self._update_rrule(**kwargs)
def set(self, **kwargs):
"""Set parameters for an existing wrapper."""
self._construct.update(kwargs)
self._update_rrule(**self._construct)
def _update_rrule(self, **kwargs):
tzinfo = self._base_tzinfo
# rrule does not play nicely with timezones - especially pytz time
# zones, it's best to use naive zones and attach timezones once the
# datetimes are returned
if 'dtstart' in kwargs:
dtstart = kwargs['dtstart']
if dtstart.tzinfo is not None:
if tzinfo is None:
tzinfo = dtstart.tzinfo
else:
dtstart = dtstart.astimezone(tzinfo)
kwargs['dtstart'] = dtstart.replace(tzinfo=None)
if 'until' in kwargs:
until = kwargs['until']
if until.tzinfo is not None:
if tzinfo is not None:
until = until.astimezone(tzinfo)
else:
raise ValueError('until cannot be aware if dtstart '
'is naive and tzinfo is None')
kwargs['until'] = until.replace(tzinfo=None)
self._construct = kwargs.copy()
self._tzinfo = tzinfo
self._rrule = rrule(**self._construct)
def _attach_tzinfo(self, dt, tzinfo):
# pytz zones are attached by "localizing" the datetime
if hasattr(tzinfo, 'localize'):
return tzinfo.localize(dt, is_dst=True)
return dt.replace(tzinfo=tzinfo)
def _aware_return_wrapper(self, f, returns_list=False):
"""Decorator function that allows rrule methods to handle tzinfo."""
# This is only necessary if we're actually attaching a tzinfo
if self._tzinfo is None:
return f
# All datetime arguments must be naive. If they are not naive, they are
# converted to the _tzinfo zone before dropping the zone.
def normalize_arg(arg):
if isinstance(arg, datetime.datetime) and arg.tzinfo is not None:
if arg.tzinfo is not self._tzinfo:
arg = arg.astimezone(self._tzinfo)
return arg.replace(tzinfo=None)
return arg
def normalize_args(args, kwargs):
args = tuple(normalize_arg(arg) for arg in args)
kwargs = {kw: normalize_arg(arg) for kw, arg in kwargs.items()}
return args, kwargs
# There are two kinds of functions we care about - ones that return
# dates and ones that return lists of dates.
if not returns_list:
def inner_func(*args, **kwargs):
args, kwargs = normalize_args(args, kwargs)
dt = f(*args, **kwargs)
return self._attach_tzinfo(dt, self._tzinfo)
else:
def inner_func(*args, **kwargs):
args, kwargs = normalize_args(args, kwargs)
dts = f(*args, **kwargs)
return [self._attach_tzinfo(dt, self._tzinfo) for dt in dts]
return functools.wraps(f)(inner_func)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
f = getattr(self._rrule, name)
if name in {'after', 'before'}:
return self._aware_return_wrapper(f)
elif name in {'xafter', 'xbefore', 'between'}:
return self._aware_return_wrapper(f, returns_list=True)
else:
return f
def __setstate__(self, state):
self.__dict__.update(state)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
This class is subclassed by other Locators and
is not meant to be used on its own.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
Parameters
----------
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
self.tz = _get_tzinfo(tz)
def set_tzinfo(self, tz):
"""
Set timezone info.
Parameters
----------
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
self.tz = _get_tzinfo(tz)
def datalim_to_dt(self):
"""Convert axis data interval to datetime objects."""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""Convert the view interval to datetime objects."""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
if not np.isfinite(vmin) or not np.isfinite(vmax):
# Except if there is no data, then use 1970 as default.
return (date2num(datetime.date(1970, 1, 1)),
date2num(datetime.date(1970, 1, 2)))
if vmax < vmin:
vmin, vmax = vmax, vmin
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
super().__init__(tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
start, stop = self._create_rrule(vmin, vmax)
dates = self.rule.between(start, stop, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _create_rrule(self, vmin, vmax):
# set appropriate rrule dtstart and until and return
# start and end
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except (ValueError, OverflowError):
# cap
start = datetime.datetime(1, 1, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc)
try:
stop = vmax + delta
except (ValueError, OverflowError):
# cap
stop = datetime.datetime(9999, 12, 31, 23, 59, 59,
tzinfo=datetime.timezone.utc)
self.rule.set(dtstart=start, until=stop)
return vmin, vmax
def _get_unit(self):
# docstring inherited
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best `DateLocator` to set the view
limits and the tick locations.
Attributes
----------
intervald : dict
Mapping of tick frequencies to multiples allowed for that ticking.
The default is ::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14, 21],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500,
1000, 2000, 5000, 10000, 20000, 50000,
100000, 200000, 500000, 1000000],
}
where the keys are defined in `dateutil.rrule`.
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
When customizing, you should only modify the values for the existing
keys. You should not add or delete entries.
Example for forcing ticks every 3 hours::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=True):
"""
Parameters
----------
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
minticks : int
The minimum number of ticks desired; controls whether ticks occur
yearly, monthly, etc.
maxticks : int
The maximum number of ticks desired; controls the interval between
ticks (ticking every other, every 3, etc.). For fine-grained
control, this can be a dictionary mapping individual rrule
frequency constants (YEARLY, MONTHLY, etc.) to their own maximum
number of ticks. This can be used to keep the number of ticks
appropriate to the format chosen in `AutoDateFormatter`. Any
frequency not specified in this dictionary is given a default
value.
interval_multiples : bool, default: True
Whether ticks should be chosen to be multiple of the interval,
locking them to 'nicer' locations. For example, this will force
the ticks to be at hours 0, 6, 12, 18 when hourly ticking is done
at 6 hour intervals.
"""
super().__init__(tz=tz)
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict.fromkeys(self._freqs, maxticks)
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
if interval_multiples:
# Swap "3" for "4" in the DAILY list; If we use 3 we get bad
# tick loc for months w/ 31 days: 1, 4, ..., 28, 31, 1
# If we use 4 then we get: 1, 5, ... 25, 29, 1
self.intervald[DAILY] = [1, 2, 4, 7, 14]
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
# docstring inherited
dmin, dmax = self.viewlim_to_dt()
locator = self.get_locator(dmin, dmax)
return locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if not np.isfinite(vmin) or not np.isfinite(vmax):
# Except if there is no data, then use 1970 as default.
return (date2num(datetime.date(1970, 1, 1)),
date2num(datetime.date(1970, 1, 2)))
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def get_locator(self, dmin, dmax):
"""Pick the best locator based on a distance."""
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = numYears * MONTHS_PER_YEAR + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year.
numHours = numDays * HOURS_PER_DAY + delta.hours
numMinutes = numHours * MIN_PER_HOUR + delta.minutes
numSeconds = np.floor(tdelta.total_seconds())
numMicroseconds = np.floor(tdelta.total_seconds() * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from a list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
if not (self.interval_multiples and freq == DAILY):
_api.warn_external(
f"AutoDateLocator was unable to pick an appropriate "
f"interval for this date range. It may be necessary "
f"to add an interval value to the AutoDateLocator's "
f"intervald dictionary. Defaulting to {interval}.")
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
if i in (DAILY, WEEKLY):
if interval == 14:
# just make first and 15th. Avoids 30th.
byranges[i] = [1, 15]
elif interval == 7:
byranges[i] = [1, 8, 15, 22]
interval = 1
else:
byranges[i] = self._byranges[i]
break
else:
interval = 1
if (freq == YEARLY) and self.interval_multiples:
locator = YearLocator(interval, tz=self.tz)
elif use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, tz=self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
if date2num(dmin) > 70 * 365 and interval < 1000:
_api.warn_external(
'Plotting microsecond time intervals for dates far from '
f'the epoch (time origin: {get_epoch()}) is not well-'
'supported. See matplotlib.dates.set_epoch to change the '
'epoch.')
locator.set_axis(self.axis)
return locator
class YearLocator(RRuleLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Parameters
----------
base : int, default: 1
Mark ticks every *base* years.
month : int, default: 1
The month on which to place the ticks, starting from 1. Default is
January.
day : int, default: 1
The day on which to place the ticks.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
rule = rrulewrapper(YEARLY, interval=base, bymonth=month,
bymonthday=day, **self.hms0d)
super().__init__(rule, tz=tz)
self.base = ticker._Edge_integer(base, 0)
def _create_rrule(self, vmin, vmax):
# 'start' needs to be a multiple of the interval to create ticks on
# interval multiples when the tick frequency is YEARLY
ymin = max(self.base.le(vmin.year) * self.base.step, 1)
ymax = min(self.base.ge(vmax.year) * self.base.step, 9999)
c = self.rule._construct
replace = {'year': ymin,
'month': c.get('bymonth', 1),
'day': c.get('bymonthday', 1),
'hour': 0, 'minute': 0, 'second': 0}
start = vmin.replace(**replace)
stop = start.replace(year=ymax)
self.rule.set(dtstart=start, until=stop)
return start, stop
class MonthLocator(RRuleLocator):
"""
Make ticks on occurrences of each month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Parameters
----------
bymonth : int or list of int, default: all months
Ticks will be placed on every month in *bymonth*. Default is
``range(1, 13)``, i.e. every month.
bymonthday : int, default: 1
The day on which to place the ticks.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if bymonth is None:
bymonth = range(1, 13)
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
super().__init__(rule, tz=tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurrences of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Parameters
----------
byweekday : int or list of int, default: all days
Ticks will be placed on every weekday in *byweekday*. Default is
every day.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
super().__init__(rule, tz=tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurrences of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Parameters
----------
bymonthday : int or list of int, default: all days
Ticks will be placed on every day in *bymonthday*. Default is
``bymonthday=range(1, 32)``, i.e., every day of the month.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if interval != int(interval) or interval < 1:
raise ValueError("interval must be an integer greater than 0")
if bymonthday is None:
bymonthday = range(1, 32)
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
super().__init__(rule, tz=tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurrences of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Parameters
----------
byhour : int or list of int, default: all hours
Ticks will be placed on every hour in *byhour*. Default is
``byhour=range(24)``, i.e., every hour.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
super().__init__(rule, tz=tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurrences of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Parameters
----------
byminute : int or list of int, default: all minutes
Ticks will be placed on every minute in *byminute*. Default is
``byminute=range(60)``, i.e., every minute.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
super().__init__(rule, tz=tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurrences of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Parameters
----------
bysecond : int or list of int, default: all seconds
Ticks will be placed on every second in *bysecond*. Default is
``bysecond = range(60)``, i.e., every second.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
super().__init__(rule, tz=tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on regular intervals of one or more microsecond(s).
.. note::
By default, Matplotlib uses a floating point representation of time in
days since the epoch, so plotting data with
microsecond time resolution does not work well for
dates that are far (about 70 years) from the epoch (check with
`~.dates.get_epoch`).
If you want sub-microsecond resolution time plots, it is strongly
recommended to use floating point seconds, not datetime-like
time representation.
If you really must use datetime.datetime() or similar and still
need microsecond precision, change the time origin via
`.dates.set_epoch` to something closer to the dates being plotted.
See :doc:`/gallery/ticks/date_precision_and_epochs`.
"""
def __init__(self, interval=1, tz=None):
"""
Parameters
----------
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
super().__init__(tz=tz)
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return super().set_axis(axis)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
t0 = np.floor(nmin)
nmax = nmax - t0
nmin = nmin - t0
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = ticks / MUSECONDS_PER_DAY + t0
return ticks
def _get_unit(self):
# docstring inherited
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
# docstring inherited
return self._interval
class DateConverter(units.ConversionInterface):
"""
Converter for `datetime.date` and `datetime.datetime` data, or for
date/time data represented as it would be converted by `date2num`.
The 'unit' tag for such data is None or a `~datetime.tzinfo` instance.
"""
def __init__(self, *, interval_multiples=True):
self._interval_multiples = interval_multiples
super().__init__()
def axisinfo(self, unit, axis):
"""
Return the `~matplotlib.units.AxisInfo` for *unit*.
*unit* is a `~datetime.tzinfo` instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz,
interval_multiples=self._interval_multiples)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(1970, 1, 1)
datemax = datetime.date(1970, 1, 2)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers, convert it
with `date2num`.
The *unit* and *axis* arguments are not used.
"""
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the `~datetime.tzinfo` instance of *x* or of its first element,
or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook._safe_first_finite(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
class ConciseDateConverter(DateConverter):
# docstring inherited
def __init__(self, formats=None, zero_formats=None, offset_formats=None,
show_offset=True, *, interval_multiples=True):
self._formats = formats
self._zero_formats = zero_formats
self._offset_formats = offset_formats
self._show_offset = show_offset
self._interval_multiples = interval_multiples
super().__init__()
def axisinfo(self, unit, axis):
# docstring inherited
tz = unit
majloc = AutoDateLocator(tz=tz,
interval_multiples=self._interval_multiples)
majfmt = ConciseDateFormatter(majloc, tz=tz, formats=self._formats,
zero_formats=self._zero_formats,
offset_formats=self._offset_formats,
show_offset=self._show_offset)
datemin = datetime.date(1970, 1, 1)
datemax = datetime.date(1970, 1, 2)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
class _SwitchableDateConverter:
"""
Helper converter-like object that generates and dispatches to
temporary ConciseDateConverter or DateConverter instances based on
:rc:`date.converter` and :rc:`date.interval_multiples`.
"""
@staticmethod
def _get_converter():
converter_cls = {
"concise": ConciseDateConverter, "auto": DateConverter}[
mpl.rcParams["date.converter"]]
interval_multiples = mpl.rcParams["date.interval_multiples"]
return converter_cls(interval_multiples=interval_multiples)
def axisinfo(self, *args, **kwargs):
return self._get_converter().axisinfo(*args, **kwargs)
def default_units(self, *args, **kwargs):
return self._get_converter().default_units(*args, **kwargs)
def convert(self, *args, **kwargs):
return self._get_converter().convert(*args, **kwargs)
units.registry[np.datetime64] = \
units.registry[datetime.date] = \
units.registry[datetime.datetime] = \
_SwitchableDateConverter()
venv\Lib\site-packages\matplotlib\dviread.py
"""
A module for reading dvi files output by TeX. Several limitations make
this not (currently) useful as a general-purpose dvi preprocessor, but
it is currently used by the pdf backend for processing usetex text.
Interface::
with Dvi(filename, 72) as dvi:
# iterate over pages:
for page in dvi:
w, h, d = page.width, page.height, page.descent
for x, y, font, glyph, width in page.text:
fontname = font.texname
pointsize = font.size
...
for x, y, height, width in page.boxes:
...
"""
from collections import namedtuple
import enum
from functools import lru_cache, partial, wraps
import logging
import os
from pathlib import Path
import re
import struct
import subprocess
import sys
import numpy as np
from matplotlib import _api, cbook
_log = logging.getLogger(__name__)
# Many dvi related files are looked for by external processes, require
# additional parsing, and are used many times per rendering, which is why they
# are cached using lru_cache().
# Dvi is a bytecode format documented in
# https://ctan.org/pkg/dvitype
# https://texdoc.org/serve/dvitype.pdf/0
#
# The file consists of a preamble, some number of pages, a postamble,
# and a finale. Different opcodes are allowed in different contexts,
# so the Dvi object has a parser state:
#
# pre: expecting the preamble
# outer: between pages (followed by a page or the postamble,
# also e.g. font definitions are allowed)
# page: processing a page
# post_post: state after the postamble (our current implementation
# just stops reading)
# finale: the finale (unimplemented in our current implementation)
_dvistate = enum.Enum('DviState', 'pre outer inpage post_post finale')
# The marks on a page consist of text and boxes. A page also has dimensions.
Page = namedtuple('Page', 'text boxes height width descent')
Box = namedtuple('Box', 'x y height width')
# Also a namedtuple, for backcompat.
class Text(namedtuple('Text', 'x y font glyph width')):
"""
A glyph in the dvi file.
The *x* and *y* attributes directly position the glyph. The *font*,
*glyph*, and *width* attributes are kept public for back-compatibility,
but users wanting to draw the glyph themselves are encouraged to instead
load the font specified by `font_path` at `font_size`, warp it with the
effects specified by `font_effects`, and load the glyph specified by
`glyph_name_or_index`.
"""
def _get_pdftexmap_entry(self):
return PsfontsMap(find_tex_file("pdftex.map"))[self.font.texname]
@property
def font_path(self):
"""The `~pathlib.Path` to the font for this glyph."""
psfont = self._get_pdftexmap_entry()
if psfont.filename is None:
raise ValueError("No usable font file found for {} ({}); "
"the font may lack a Type-1 version"
.format(psfont.psname.decode("ascii"),
psfont.texname.decode("ascii")))
return Path(psfont.filename)
@property
def font_size(self):
"""The font size."""
return self.font.size
@property
def font_effects(self):
"""
The "font effects" dict for this glyph.
This dict contains the values for this glyph of SlantFont and
ExtendFont (if any), read off :file:`pdftex.map`.
"""
return self._get_pdftexmap_entry().effects
@property
def glyph_name_or_index(self):
"""
Either the glyph name or the native charmap glyph index.
If :file:`pdftex.map` specifies an encoding for this glyph's font, that
is a mapping of glyph indices to Adobe glyph names; use it to convert
dvi indices to glyph names. Callers can then convert glyph names to
glyph indices (with FT_Get_Name_Index/get_name_index), and load the
glyph using FT_Load_Glyph/load_glyph.
If :file:`pdftex.map` specifies no encoding, the indices directly map
to the font's "native" charmap; glyphs should directly load using
FT_Load_Char/load_char after selecting the native charmap.
"""
entry = self._get_pdftexmap_entry()
return (_parse_enc(entry.encoding)[self.glyph]
if entry.encoding is not None else self.glyph)
# Opcode argument parsing
#
# Each of the following functions takes a Dvi object and delta, which is the
# difference between the opcode and the minimum opcode with the same meaning.
# Dvi opcodes often encode the number of argument bytes in this delta.
_arg_mapping = dict(
# raw: Return delta as is.
raw=lambda dvi, delta: delta,
# u1: Read 1 byte as an unsigned number.
u1=lambda dvi, delta: dvi._read_arg(1, signed=False),
# u4: Read 4 bytes as an unsigned number.
u4=lambda dvi, delta: dvi._read_arg(4, signed=False),
# s4: Read 4 bytes as a signed number.
s4=lambda dvi, delta: dvi._read_arg(4, signed=True),
# slen: Read delta bytes as a signed number, or None if delta is None.
slen=lambda dvi, delta: dvi._read_arg(delta, signed=True) if delta else None,
# slen1: Read (delta + 1) bytes as a signed number.
slen1=lambda dvi, delta: dvi._read_arg(delta + 1, signed=True),
# ulen1: Read (delta + 1) bytes as an unsigned number.
ulen1=lambda dvi, delta: dvi._read_arg(delta + 1, signed=False),
# olen1: Read (delta + 1) bytes as an unsigned number if less than 4 bytes,
# as a signed number if 4 bytes.
olen1=lambda dvi, delta: dvi._read_arg(delta + 1, signed=(delta == 3)),
)
def _dispatch(table, min, max=None, state=None, args=('raw',)):
"""
Decorator for dispatch by opcode. Sets the values in *table*
from *min* to *max* to this method, adds a check that the Dvi state
matches *state* if not None, reads arguments from the file according
to *args*.
Parameters
----------
table : dict[int, callable]
The dispatch table to be filled in.
min, max : int
Range of opcodes that calls the registered function; *max* defaults to
*min*.
state : _dvistate, optional
State of the Dvi object in which these opcodes are allowed.
args : list[str], default: ['raw']
Sequence of argument specifications:
- 'raw': opcode minus minimum
- 'u1': read one unsigned byte
- 'u4': read four bytes, treat as an unsigned number
- 's4': read four bytes, treat as a signed number
- 'slen': read (opcode - minimum) bytes, treat as signed
- 'slen1': read (opcode - minimum + 1) bytes, treat as signed
- 'ulen1': read (opcode - minimum + 1) bytes, treat as unsigned
- 'olen1': read (opcode - minimum + 1) bytes, treat as unsigned
if under four bytes, signed if four bytes
"""
def decorate(method):
get_args = [_arg_mapping[x] for x in args]
@wraps(method)
def wrapper(self, byte):
if state is not None and self.state != state:
raise ValueError("state precondition failed")
return method(self, *[f(self, byte-min) for f in get_args])
if max is None:
table[min] = wrapper
else:
for i in range(min, max+1):
assert table[i] is None
table[i] = wrapper
return wrapper
return decorate
class Dvi:
"""
A reader for a dvi ("device-independent") file, as produced by TeX.
The current implementation can only iterate through pages in order,
and does not even attempt to verify the postamble.
This class can be used as a context manager to close the underlying
file upon exit. Pages can be read via iteration. Here is an overly
simple way to extract text without trying to detect whitespace::
>>> with matplotlib.dviread.Dvi('input.dvi', 72) as dvi:
... for page in dvi:
... print(''.join(chr(t.glyph) for t in page.text))
"""
# dispatch table
_dtable = [None] * 256
_dispatch = partial(_dispatch, _dtable)
def __init__(self, filename, dpi):
"""
Read the data from the file named *filename* and convert
TeX's internal units to units of *dpi* per inch.
*dpi* only sets the units and does not limit the resolution.
Use None to return TeX's internal units.
"""
_log.debug('Dvi: %s', filename)
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
self._missing_font = None
def __enter__(self):
"""Context manager enter method, does nothing."""
return self
def __exit__(self, etype, evalue, etrace):
"""
Context manager exit method, closes the underlying file if it is open.
"""
self.close()
def __iter__(self):
"""
Iterate through the pages of the file.
Yields
------
Page
Details of all the text and box objects on the page.
The Page tuple contains lists of Text and Box tuples and
the page dimensions, and the Text and Box tuples contain
coordinates transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while self._read():
yield self._output()
def close(self):
"""Close the underlying file if it is open."""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx = miny = np.inf
maxx = maxy = -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if isinstance(elt, Box):
x, y, h, w = elt
e = 0 # zero depth
else: # glyph
x, y, font, g, w = elt
h, e = font._height_depth_of(g)
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self._baseline_v is not None:
maxy_pure = self._baseline_v # This should normally be the case.
self._baseline_v = None
if not self.text and not self.boxes: # Avoid infs/nans from inf+/-inf.
return Page(text=[], boxes=[], width=0, height=0, descent=0)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return Page(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=maxy-maxy_pure)
# convert from TeX's "scaled points" to dpi units
d = self.dpi / (72.27 * 2**16)
descent = (maxy - maxy_pure) * d
text = [Text((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
for (x, y, f, g, w) in self.text]
boxes = [Box((x-minx)*d, (maxy-y)*d - descent, h*d, w*d)
for (x, y, h, w) in self.boxes]
return Page(text=text, boxes=boxes, width=(maxx-minx)*d,
height=(maxy_pure-miny)*d, descent=descent)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
# Pages appear to start with the sequence
# bop (begin of page)
# xxx comment
# # if using chemformula
# down
# push
# down
# # if using xcolor
# down
# push
# down (possibly multiple)
# push <= here, v is the baseline position.
# etc.
# (dviasm is useful to explore this structure.)
# Thus, we use the vertical position at the first time the stack depth
# reaches 3, while at least three "downs" have been executed (excluding
# those popped out (corresponding to the chemformula preamble)), as the
# baseline (the "down" count is necessary to handle xcolor).
down_stack = [0]
self._baseline_v = None
while True:
byte = self.file.read(1)[0]
self._dtable[byte](self, byte)
if self._missing_font:
raise self._missing_font.to_exception()
name = self._dtable[byte].__name__
if name == "_push":
down_stack.append(down_stack[-1])
elif name == "_pop":
down_stack.pop()
elif name == "_down":
down_stack[-1] += 1
if (self._baseline_v is None
and len(getattr(self, "stack", [])) == 3
and down_stack[-1] >= 4):
self._baseline_v = self.v
if byte == 140: # end of page
return True
if self.state is _dvistate.post_post: # end of file
self.close()
return False
def _read_arg(self, nbytes, signed=False):
"""
Read and return a big-endian integer *nbytes* long.
Signedness is determined by the *signed* keyword.
"""
return int.from_bytes(self.file.read(nbytes), "big", signed=signed)
@_dispatch(min=0, max=127, state=_dvistate.inpage)
def _set_char_immediate(self, char):
self._put_char_real(char)
if isinstance(self.fonts[self.f], cbook._ExceptionInfo):
return
self.h += self.fonts[self.f]._width_of(char)
@_dispatch(min=128, max=131, state=_dvistate.inpage, args=('olen1',))
def _set_char(self, char):
self._put_char_real(char)
if isinstance(self.fonts[self.f], cbook._ExceptionInfo):
return
self.h += self.fonts[self.f]._width_of(char)
@_dispatch(132, state=_dvistate.inpage, args=('s4', 's4'))
def _set_rule(self, a, b):
self._put_rule_real(a, b)
self.h += b
@_dispatch(min=133, max=136, state=_dvistate.inpage, args=('olen1',))
def _put_char(self, char):
self._put_char_real(char)
def _put_char_real(self, char):
font = self.fonts[self.f]
if isinstance(font, cbook._ExceptionInfo):
self._missing_font = font
elif font._vf is None:
self.text.append(Text(self.h, self.v, font, char,
font._width_of(char)))
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append(Text(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([Box(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
@_dispatch(137, state=_dvistate.inpage, args=('s4', 's4'))
def _put_rule(self, a, b):
self._put_rule_real(a, b)
def _put_rule_real(self, a, b):
if a > 0 and b > 0:
self.boxes.append(Box(self.h, self.v, a, b))
@_dispatch(138)
def _nop(self, _):
pass
@_dispatch(139, state=_dvistate.outer, args=('s4',)*11)
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
self.state = _dvistate.inpage
self.h = self.v = self.w = self.x = self.y = self.z = 0
self.stack = []
self.text = [] # list of Text objects
self.boxes = [] # list of Box objects
@_dispatch(140, state=_dvistate.inpage)
def _eop(self, _):
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
@_dispatch(141, state=_dvistate.inpage)
def _push(self, _):
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
@_dispatch(142, state=_dvistate.inpage)
def _pop(self, _):
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
@_dispatch(min=143, max=146, state=_dvistate.inpage, args=('slen1',))
def _right(self, b):
self.h += b
@_dispatch(min=147, max=151, state=_dvistate.inpage, args=('slen',))
def _right_w(self, new_w):
if new_w is not None:
self.w = new_w
self.h += self.w
@_dispatch(min=152, max=156, state=_dvistate.inpage, args=('slen',))
def _right_x(self, new_x):
if new_x is not None:
self.x = new_x
self.h += self.x
@_dispatch(min=157, max=160, state=_dvistate.inpage, args=('slen1',))
def _down(self, a):
self.v += a
@_dispatch(min=161, max=165, state=_dvistate.inpage, args=('slen',))
def _down_y(self, new_y):
if new_y is not None:
self.y = new_y
self.v += self.y
@_dispatch(min=166, max=170, state=_dvistate.inpage, args=('slen',))
def _down_z(self, new_z):
if new_z is not None:
self.z = new_z
self.v += self.z
@_dispatch(min=171, max=234, state=_dvistate.inpage)
def _fnt_num_immediate(self, k):
self.f = k
@_dispatch(min=235, max=238, state=_dvistate.inpage, args=('olen1',))
def _fnt_num(self, new_f):
self.f = new_f
@_dispatch(min=239, max=242, args=('ulen1',))
def _xxx(self, datalen):
special = self.file.read(datalen)
_log.debug(
'Dvi._xxx: encountered special: %s',
''.join([chr(ch) if 32 <= ch < 127 else '<%02x>' % ch
for ch in special]))
@_dispatch(min=243, max=246, args=('olen1', 'u4', 'u4', 'u4', 'u1', 'u1'))
def _fnt_def(self, k, c, s, d, a, l):
self._fnt_def_real(k, c, s, d, a, l)
def _fnt_def_real(self, k, c, s, d, a, l):
n = self.file.read(a + l)
fontname = n[-l:].decode('ascii')
try:
tfm = _tfmfile(fontname)
except FileNotFoundError as exc:
# Explicitly allow defining missing fonts for Vf support; we only
# register an error when trying to load a glyph from a missing font
# and throw that error in Dvi._read. For Vf, _finalize_packet
# checks whether a missing glyph has been used, and in that case
# skips the glyph definition.
self.fonts[k] = cbook._ExceptionInfo.from_exception(exc)
return
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError(f'tfm checksum mismatch: {n}')
try:
vf = _vffile(fontname)
except FileNotFoundError:
vf = None
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
@_dispatch(247, state=_dvistate.pre, args=('u1', 'u4', 'u4', 'u4', 'u1'))
def _pre(self, i, num, den, mag, k):
self.file.read(k) # comment in the dvi file
if i != 2:
raise ValueError(f"Unknown dvi format {i}")
if num != 25400000 or den != 7227 * 2**16:
raise ValueError("Nonstandard units in dvi file")
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError("Nonstandard magnification in dvi file")
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
@_dispatch(248, state=_dvistate.outer)
def _post(self, _):
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
@_dispatch(249)
def _post_post(self, _):
raise NotImplementedError
@_dispatch(min=250, max=255)
def _malformed(self, offset):
raise ValueError(f"unknown command: byte {250 + offset}")
class DviFont:
"""
Encapsulation of a font that a DVI file can refer to.
This class holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
Parameters
----------
scale : float
Factor by which the font is scaled from its natural size.
tfm : Tfm
TeX font metrics for this font
texname : bytes
Name of the font as used internally by TeX and friends, as an ASCII
bytestring. This is usually very different from any external font
names; `PsfontsMap` can be used to find the external name of the font.
vf : Vf
A TeX "virtual font" file, or None if this font is not virtual.
Attributes
----------
texname : bytes
size : float
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
widths : list
Widths of glyphs in glyph-space units, typically 1/1000ths of
the point size.
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
_api.check_isinstance(bytes, texname=texname)
self._scale = scale
self._tfm = tfm
self.texname = texname
self._vf = vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(tfm.width) + 1
except ValueError:
nchars = 0
self.widths = [(1000*tfm.width.get(char, 0)) >> 20
for char in range(nchars)]
def __eq__(self, other):
return (type(self) is type(other)
and self.texname == other.texname and self.size == other.size)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return f"<{type(self).__name__}: {self.texname}>"
def _width_of(self, char):
"""Width of char in dvi units."""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
_log.debug('No width for char %d in font %s.', char, self.texname)
return 0
def _height_depth_of(self, char):
"""Height and depth of char in dvi units."""
result = []
for metric, name in ((self._tfm.height, "height"),
(self._tfm.depth, "depth")):
value = metric.get(char, None)
if value is None:
_log.debug('No %s for char %d in font %s',
name, char, self.texname)
result.append(0)
else:
result.append(_mul2012(value, self._scale))
# cmsyXX (symbols font) glyph 0 ("minus") has a nonzero descent
# so that TeX aligns equations properly
# (https://tex.stackexchange.com/q/526103/)
# but we actually care about the rasterization depth to align
# the dvipng-generated images.
if re.match(br'^cmsy\d+$', self.texname) and char == 0:
result[-1] = 0
return result
class Vf(Dvi):
r"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Parameters
----------
filename : str or path-like
Notes
-----
The virtual font format is a derivative of dvi:
http://mirrors.ctan.org/info/knuth/virtual-fonts
This class reuses some of the machinery of `Dvi`
but replaces the `_read` loop and dispatch mechanism.
Examples
--------
::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
super().__init__(filename, 0)
try:
self._first_font = None
self._chars = {}
self._read()
finally:
self.close()
def __getitem__(self, code):
return self._chars[code]
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
packet_char = packet_ends = None
packet_len = packet_width = None
while True:
byte = self.file.read(1)[0]
# If we are in a packet, execute the dvi instructions
if self.state is _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == packet_ends:
self._finalize_packet(packet_char, packet_width)
packet_len = packet_char = packet_width = None
# fall through to out-of-packet code
elif byte_at > packet_ends:
raise ValueError("Packet length mismatch in vf file")
else:
if byte in (139, 140) or byte >= 243:
raise ValueError(f"Inappropriate opcode {byte} in vf file")
Dvi._dtable[byte](self, byte)
continue
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
packet_len = byte
packet_char = self._read_arg(1)
packet_width = self._read_arg(3)
packet_ends = self._init_packet(byte)
self.state = _dvistate.inpage
elif byte == 242: # a long packet
packet_len = self._read_arg(4)
packet_char = self._read_arg(4)
packet_width = self._read_arg(4)
self._init_packet(packet_len)
elif 243 <= byte <= 246:
k = self._read_arg(byte - 242, byte == 246)
c = self._read_arg(4)
s = self._read_arg(4)
d = self._read_arg(4)
a = self._read_arg(1)
l = self._read_arg(1)
self._fnt_def_real(k, c, s, d, a, l)
if self._first_font is None:
self._first_font = k
elif byte == 247: # preamble
i = self._read_arg(1)
k = self._read_arg(1)
x = self.file.read(k)
cs = self._read_arg(4)
ds = self._read_arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
break
else:
raise ValueError(f"Unknown vf opcode {byte}")
def _init_packet(self, pl):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.h = self.v = self.w = self.x = self.y = self.z = 0
self.stack = []
self.text = []
self.boxes = []
self.f = self._first_font
self._missing_font = None
return self.file.tell() + pl
def _finalize_packet(self, packet_char, packet_width):
if not self._missing_font: # Otherwise we don't have full glyph definition.
self._chars[packet_char] = Page(
text=self.text, boxes=self.boxes, width=packet_width,
height=None, descent=None)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state is not _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError(f"Unknown vf format {i}")
if len(x):
_log.debug('vf file comment: %s', x)
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _mul2012(num1, num2):
"""Multiply two numbers in 20.12 fixed point format."""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm:
"""
A TeX Font Metric file.
This implementation covers only the bare minimum needed by the Dvi class.
Parameters
----------
filename : str or path-like
Attributes
----------
checksum : int
Used for verifying against the dvi file.
design_size : int
Design size of the font (unknown units)
width, height, depth : dict
Dimensions of each character, need to be scaled by the factor
specified in the dvi file. These are dicts because indexing may
not start from 0.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
_log.debug('opening tfm file %s', filename)
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = struct.unpack('!6H', header1[2:14])
_log.debug('lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d',
lh, bc, ec, nw, nh, nd)
header2 = file.read(4*lh)
self.checksum, self.design_size = struct.unpack('!2I', header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = struct.unpack(f'!{nw}i', file.read(4*nw))
heights = struct.unpack(f'!{nh}i', file.read(4*nh))
depths = struct.unpack(f'!{nd}i', file.read(4*nd))
self.width = {}
self.height = {}
self.depth = {}
for idx, char in enumerate(range(bc, ec+1)):
byte0 = char_info[4*idx]
byte1 = char_info[4*idx+1]
self.width[char] = widths[byte0]
self.height[char] = heights[byte1 >> 4]
self.depth[char] = depths[byte1 & 0xf]
PsFont = namedtuple('PsFont', 'texname psname effects encoding filename')
class PsfontsMap:
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Parameters
----------
filename : str or path-like
Notes
-----
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
:file:`psfonts.map`, :file:`pdftex.map`, or :file:`dvipdfm.map`.
The file :file:`psfonts.map` is used by :program:`dvips`,
:file:`pdftex.map` by :program:`pdfTeX`, and :file:`dvipdfm.map`
by :program:`dvipdfm`. :file:`psfonts.map` might avoid embedding
the 35 PostScript fonts (i.e., have no filename for them, as in
the Times-Bold example above), while the pdf-related files perhaps
only avoid the "Base 14" pdf fonts. But the user may have
configured these files differently.
Examples
--------
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map[b'ptmbo8r']
>>> entry.texname
b'ptmbo8r'
>>> entry.psname
b'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
"""
__slots__ = ('_filename', '_unparsed', '_parsed')
# Create a filename -> PsfontsMap cache, so that calling
# `PsfontsMap(filename)` with the same filename a second time immediately
# returns the same object.
@lru_cache
def __new__(cls, filename):
self = object.__new__(cls)
self._filename = os.fsdecode(filename)
# Some TeX distributions have enormous pdftex.map files which would
# take hundreds of milliseconds to parse, but it is easy enough to just
# store the unparsed lines (keyed by the first word, which is the
# texname) and parse them on-demand.
with open(filename, 'rb') as file:
self._unparsed = {}
for line in file:
tfmname = line.split(b' ', 1)[0]
self._unparsed.setdefault(tfmname, []).append(line)
self._parsed = {}
return self
def __getitem__(self, texname):
assert isinstance(texname, bytes)
if texname in self._unparsed:
for line in self._unparsed.pop(texname):
if self._parse_and_cache_line(line):
break
try:
return self._parsed[texname]
except KeyError:
raise LookupError(
f"An associated PostScript font (required by Matplotlib) "
f"could not be found for TeX font {texname.decode('ascii')!r} "
f"in {self._filename!r}; this problem can often be solved by "
f"installing a suitable PostScript font package in your TeX "
f"package manager") from None
def _parse_and_cache_line(self, line):
"""
Parse a line in the font mapping file.
The format is (partially) documented at
http://mirrors.ctan.org/systems/doc/pdftex/manual/pdftex-a.pdf
https://tug.org/texinfohtml/dvips.html#psfonts_002emap
Each line can have the following fields:
- tfmname (first, only required field),
- psname (defaults to tfmname, must come immediately after tfmname if
present),
- fontflags (integer, must come immediately after psname if present,
ignored by us),
- special (SlantFont and ExtendFont, only field that is double-quoted),
- fontfile, encodingfile (optional, prefixed by <, <<, or <[; << always
precedes a font, <[ always precedes an encoding, < can precede either
but then an encoding file must have extension .enc; < and << also
request different font subsetting behaviors but we ignore that; < can
be separated from the filename by whitespace).
special, fontfile, and encodingfile can appear in any order.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# https://tex.stackexchange.com/q/10826/
if not line or line.startswith((b" ", b"%", b"*", b";", b"#")):
return
tfmname = basename = special = encodingfile = fontfile = None
is_subsetted = is_t1 = is_truetype = False
matches = re.finditer(br'"([^"]*)(?:"|$)|(\S+)', line)
for match in matches:
quoted, unquoted = match.groups()
if unquoted:
if unquoted.startswith(b"<<"): # font
fontfile = unquoted[2:]
elif unquoted.startswith(b"<["): # encoding
encodingfile = unquoted[2:]
elif unquoted.startswith(b"<"): # font or encoding
word = (
# foo
unquoted[1:]
# < by itself => read the next word
or next(filter(None, next(matches).groups())))
if word.endswith(b".enc"):
encodingfile = word
else:
fontfile = word
is_subsetted = True
elif tfmname is None:
tfmname = unquoted
elif basename is None:
basename = unquoted
elif quoted:
special = quoted
effects = {}
if special:
words = reversed(special.split())
for word in words:
if word == b"SlantFont":
effects["slant"] = float(next(words))
elif word == b"ExtendFont":
effects["extend"] = float(next(words))
# Verify some properties of the line that would cause it to be ignored
# otherwise.
if fontfile is not None:
if fontfile.endswith((b".ttf", b".ttc")):
is_truetype = True
elif not fontfile.endswith(b".otf"):
is_t1 = True
elif basename is not None:
is_t1 = True
if is_truetype and is_subsetted and encodingfile is None:
return
if not is_t1 and ("slant" in effects or "extend" in effects):
return
if abs(effects.get("slant", 0)) > 1:
return
if abs(effects.get("extend", 0)) > 2:
return
if basename is None:
basename = tfmname
if encodingfile is not None:
encodingfile = find_tex_file(encodingfile)
if fontfile is not None:
fontfile = find_tex_file(fontfile)
self._parsed[tfmname] = PsFont(
texname=tfmname, psname=basename, effects=effects,
encoding=encodingfile, filename=fontfile)
return True
def _parse_enc(path):
r"""
Parse a \*.enc file referenced from a psfonts.map style file.
The format supported by this function is a tiny subset of PostScript.
Parameters
----------
path : `os.PathLike`
Returns
-------
list
The nth entry of the list is the PostScript glyph name of the nth
glyph.
"""
no_comments = re.sub("%.*", "", Path(path).read_text(encoding="ascii"))
array = re.search(r"(?s)\[(.*)\]", no_comments).group(1)
lines = [line for line in array.split() if line]
if all(line.startswith("/") for line in lines):
return [line[1:] for line in lines]
else:
raise ValueError(f"Failed to parse {path} as Postscript encoding")
class _LuatexKpsewhich:
@lru_cache # A singleton.
def __new__(cls):
self = object.__new__(cls)
self._proc = self._new_proc()
return self
def _new_proc(self):
return subprocess.Popen(
["luatex", "--luaonly",
str(cbook._get_data_path("kpsewhich.lua"))],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def search(self, filename):
if self._proc.poll() is not None: # Dead, restart it.
self._proc = self._new_proc()
self._proc.stdin.write(os.fsencode(filename) + b"\n")
self._proc.stdin.flush()
out = self._proc.stdout.readline().rstrip()
return None if out == b"nil" else os.fsdecode(out)
@lru_cache
def find_tex_file(filename):
"""
Find a file in the texmf tree using kpathsea_.
The kpathsea library, provided by most existing TeX distributions, both
on Unix-like systems and on Windows (MikTeX), is invoked via a long-lived
luatex process if luatex is installed, or via kpsewhich otherwise.
.. _kpathsea: https://www.tug.org/kpathsea/
Parameters
----------
filename : str or path-like
Raises
------
FileNotFoundError
If the file is not found.
"""
# we expect these to always be ascii encoded, but use utf-8
# out of caution
if isinstance(filename, bytes):
filename = filename.decode('utf-8', errors='replace')
try:
lk = _LuatexKpsewhich()
except FileNotFoundError:
lk = None # Fallback to directly calling kpsewhich, as below.
if lk:
path = lk.search(filename)
else:
if sys.platform == 'win32':
# On Windows only, kpathsea can use utf-8 for cmd args and output.
# The `command_line_encoding` environment variable is set to force
# it to always use utf-8 encoding. See Matplotlib issue #11848.
kwargs = {'env': {**os.environ, 'command_line_encoding': 'utf-8'},
'encoding': 'utf-8'}
else: # On POSIX, run through the equivalent of os.fsdecode().
kwargs = {'encoding': sys.getfilesystemencoding(),
'errors': 'surrogateescape'}
try:
path = (cbook._check_and_log_subprocess(['kpsewhich', filename],
_log, **kwargs)
.rstrip('\n'))
except (FileNotFoundError, RuntimeError):
path = None
if path:
return path
else:
raise FileNotFoundError(
f"Matplotlib's TeX implementation searched for a file named "
f"{filename!r} in your texmf tree, but could not find it")
@lru_cache
def _fontfile(cls, suffix, texname):
return cls(find_tex_file(texname + suffix))
_tfmfile = partial(_fontfile, Tfm, ".tfm")
_vffile = partial(_fontfile, Vf, ".vf")
if __name__ == '__main__':
from argparse import ArgumentParser
import itertools
parser = ArgumentParser()
parser.add_argument("filename")
parser.add_argument("dpi", nargs="?", type=float, default=None)
args = parser.parse_args()
with Dvi(args.filename, args.dpi) as dvi:
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print(f"=== new page === "
f"(w: {page.width}, h: {page.height}, d: {page.descent})")
for font, group in itertools.groupby(
page.text, lambda text: text.font):
print(f"font: {font.texname.decode('latin-1')!r}\t"
f"scale: {font._scale / 2 ** 20}")
print("x", "y", "glyph", "chr", "w", "(glyphs)", sep="\t")
for text in group:
print(text.x, text.y, text.glyph,
chr(text.glyph) if chr(text.glyph).isprintable()
else ".",
text.width, sep="\t")
if page.boxes:
print("x", "y", "h", "w", "", "(boxes)", sep="\t")
for box in page.boxes:
print(box.x, box.y, box.height, box.width, sep="\t")
venv\Lib\site-packages\matplotlib\figure.py
"""
`matplotlib.figure` implements the following classes:
`Figure`
Top level `~matplotlib.artist.Artist`, which holds all plot elements.
Many methods are implemented in `FigureBase`.
`SubFigure`
A logical figure inside a figure, usually added to a figure (or parent `SubFigure`)
with `Figure.add_subfigure` or `Figure.subfigures` methods.
Figures are typically created using pyplot methods `~.pyplot.figure`,
`~.pyplot.subplots`, and `~.pyplot.subplot_mosaic`.
.. plot::
:include-source:
fig, ax = plt.subplots(figsize=(2, 2), facecolor='lightskyblue',
layout='constrained')
fig.suptitle('Figure')
ax.set_title('Axes', loc='left', fontstyle='oblique', fontsize='medium')
Some situations call for directly instantiating a `~.figure.Figure` class,
usually inside an application of some sort (see :ref:`user_interfaces` for a
list of examples) . More information about Figures can be found at
:ref:`figure-intro`.
"""
from contextlib import ExitStack
import inspect
import itertools
import functools
import logging
from numbers import Integral
import threading
import numpy as np
import matplotlib as mpl
from matplotlib import _blocking_input, backend_bases, _docstring, projections
from matplotlib.artist import (
Artist, allow_rasterization, _finalize_rasterization)
from matplotlib.backend_bases import (
DrawEvent, FigureCanvasBase, NonGuiException, MouseButton, _get_renderer)
import matplotlib._api as _api
import matplotlib.cbook as cbook
import matplotlib.colorbar as cbar
import matplotlib.image as mimage
from matplotlib.axes import Axes
from matplotlib.gridspec import GridSpec, SubplotParams
from matplotlib.layout_engine import (
ConstrainedLayoutEngine, TightLayoutEngine, LayoutEngine,
PlaceHolderLayoutEngine
)
import matplotlib.legend as mlegend
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
_log = logging.getLogger(__name__)
def _stale_figure_callback(self, val):
if (fig := self.get_figure(root=False)) is not None:
fig.stale = val
class _AxesStack:
"""
Helper class to track Axes in a figure.
Axes are tracked both in the order in which they have been added
(``self._axes`` insertion/iteration order) and in the separate "gca" stack
(which is the index to which they map in the ``self._axes`` dict).
"""
def __init__(self):
self._axes = {} # Mapping of Axes to "gca" order.
self._counter = itertools.count()
def as_list(self):
"""List the Axes that have been added to the figure."""
return [*self._axes] # This relies on dict preserving order.
def remove(self, a):
"""Remove the Axes from the stack."""
self._axes.pop(a)
def bubble(self, a):
"""Move an Axes, which must already exist in the stack, to the top."""
if a not in self._axes:
raise ValueError("Axes has not been added yet")
self._axes[a] = next(self._counter)
def add(self, a):
"""Add an Axes to the stack, ignoring it if already present."""
if a not in self._axes:
self._axes[a] = next(self._counter)
def current(self):
"""Return the active Axes, or None if the stack is empty."""
return max(self._axes, key=self._axes.__getitem__, default=None)
def __getstate__(self):
return {
**vars(self),
"_counter": max(self._axes.values(), default=0)
}
def __setstate__(self, state):
next_counter = state.pop('_counter')
vars(self).update(state)
self._counter = itertools.count(next_counter)
class FigureBase(Artist):
"""
Base class for `.Figure` and `.SubFigure` containing the methods that add
artists to the figure or subfigure, create Axes, etc.
"""
def __init__(self, **kwargs):
super().__init__()
# remove the non-figure artist _axes property
# as it makes no sense for a figure to be _in_ an Axes
# this is used by the property methods in the artist base class
# which are over-ridden in this class
del self._axes
self._suptitle = None
self._supxlabel = None
self._supylabel = None
# groupers to keep track of x, y labels and title we want to align.
# see self.align_xlabels, self.align_ylabels,
# self.align_titles, and axis._get_tick_boxes_siblings
self._align_label_groups = {
"x": cbook.Grouper(),
"y": cbook.Grouper(),
"title": cbook.Grouper()
}
self._localaxes = [] # track all Axes
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
self.subfigs = []
self.stale = True
self.suppressComposite = None
self.set(**kwargs)
def _get_draw_artists(self, renderer):
"""Also runs apply_aspect"""
artists = self.get_children()
artists.remove(self.patch)
artists = sorted(
(artist for artist in artists if not artist.get_animated()),
key=lambda artist: artist.get_zorder())
for ax in self._localaxes:
locator = ax.get_axes_locator()
ax.apply_aspect(locator(ax, renderer) if locator else None)
for child in ax.get_children():
if hasattr(child, 'apply_aspect'):
locator = child.get_axes_locator()
child.apply_aspect(
locator(child, renderer) if locator else None)
return artists
def autofmt_xdate(
self, bottom=0.2, rotation=30, ha='right', which='major'):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared x-axis where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
Parameters
----------
bottom : float, default: 0.2
The bottom of the subplots for `subplots_adjust`.
rotation : float, default: 30 degrees
The rotation angle of the xtick labels in degrees.
ha : {'left', 'center', 'right'}, default: 'right'
The horizontal alignment of the xticklabels.
which : {'major', 'minor', 'both'}, default: 'major'
Selects which ticklabels to rotate.
"""
_api.check_in_list(['major', 'minor', 'both'], which=which)
axes = [ax for ax in self.axes if ax._label != '']
allsubplots = all(ax.get_subplotspec() for ax in axes)
if len(axes) == 1:
for label in self.axes[0].get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in axes:
if ax.get_subplotspec().is_last_row():
for label in ax.get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels(which=which):
label.set_visible(False)
ax.set_xlabel('')
engine = self.get_layout_engine()
if allsubplots and (engine is None or engine.adjust_compatible):
self.subplots_adjust(bottom=bottom)
self.stale = True
def get_children(self):
"""Get a list of artists contained in the figure."""
return [self.patch,
*self.artists,
*self._localaxes,
*self.lines,
*self.patches,
*self.texts,
*self.images,
*self.legends,
*self.subfigs]
def get_figure(self, root=None):
"""
Return the `.Figure` or `.SubFigure` instance the (Sub)Figure belongs to.
Parameters
----------
root : bool, default=True
If False, return the (Sub)Figure this artist is on. If True,
return the root Figure for a nested tree of SubFigures.
.. deprecated:: 3.10
From version 3.12 *root* will default to False.
"""
if self._root_figure is self:
# Top level Figure
return self
if self._parent is self._root_figure:
# Return early to prevent the deprecation warning when *root* does not
# matter
return self._parent
if root is None:
# When deprecation expires, consider removing the docstring and just
# inheriting the one from Artist.
message = ('From Matplotlib 3.12 SubFigure.get_figure will by default '
'return the direct parent figure, which may be a SubFigure. '
'To suppress this warning, pass the root parameter. Pass '
'`True` to maintain the old behavior and `False` to opt-in to '
'the future behavior.')
_api.warn_deprecated('3.10', message=message)
root = True
if root:
return self._root_figure
return self._parent
def set_figure(self, fig):
"""
.. deprecated:: 3.10
Currently this method will raise an exception if *fig* is anything other
than the root `.Figure` this (Sub)Figure is on. In future it will always
raise an exception.
"""
no_switch = ("The parent and root figures of a (Sub)Figure are set at "
"instantiation and cannot be changed.")
if fig is self._root_figure:
_api.warn_deprecated(
"3.10",
message=(f"{no_switch} From Matplotlib 3.12 this operation will raise "
"an exception."))
return
raise ValueError(no_switch)
figure = property(functools.partial(get_figure, root=True), set_figure,
doc=("The root `Figure`. To get the parent of a `SubFigure`, "
"use the `get_figure` method."))
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns
-------
bool, {}
"""
if self._different_canvas(mouseevent):
return False, {}
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, renderer=None):
# docstring inherited
return self.bbox
def _suplabels(self, t, info, **kwargs):
"""
Add a centered %(name)s to the figure.
Parameters
----------
t : str
The %(name)s text.
x : float, default: %(x0)s
The x location of the text in figure coordinates.
y : float, default: %(y0)s
The y location of the text in figure coordinates.
horizontalalignment, ha : {'center', 'left', 'right'}, default: %(ha)s
The horizontal alignment of the text relative to (*x*, *y*).
verticalalignment, va : {'top', 'center', 'bottom', 'baseline'}, \
default: %(va)s
The vertical alignment of the text relative to (*x*, *y*).
fontsize, size : default: :rc:`figure.%(rc)ssize`
The font size of the text. See `.Text.set_size` for possible
values.
fontweight, weight : default: :rc:`figure.%(rc)sweight`
The font weight of the text. See `.Text.set_weight` for possible
values.
Returns
-------
text
The `.Text` instance of the %(name)s.
Other Parameters
----------------
fontproperties : None or dict, optional
A dict of font properties. If *fontproperties* is given the
default values for font size and weight are taken from the
`.FontProperties` defaults. :rc:`figure.%(rc)ssize` and
:rc:`figure.%(rc)sweight` are ignored in this case.
**kwargs
Additional kwargs are `matplotlib.text.Text` properties.
"""
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
if info['name'] in ['_supxlabel', '_suptitle']:
autopos = y is None
elif info['name'] == '_supylabel':
autopos = x is None
if x is None:
x = info['x0']
if y is None:
y = info['y0']
kwargs = cbook.normalize_kwargs(kwargs, Text)
kwargs.setdefault('horizontalalignment', info['ha'])
kwargs.setdefault('verticalalignment', info['va'])
kwargs.setdefault('rotation', info['rotation'])
if 'fontproperties' not in kwargs:
kwargs.setdefault('fontsize', mpl.rcParams[info['size']])
kwargs.setdefault('fontweight', mpl.rcParams[info['weight']])
suplab = getattr(self, info['name'])
if suplab is not None:
suplab.set_text(t)
suplab.set_position((x, y))
suplab.set(**kwargs)
else:
suplab = self.text(x, y, t, **kwargs)
setattr(self, info['name'], suplab)
suplab._autopos = autopos
self.stale = True
return suplab
@_docstring.Substitution(x0=0.5, y0=0.98, name='super title', ha='center',
va='top', rc='title')
@_docstring.copy(_suplabels)
def suptitle(self, t, **kwargs):
# docstring from _suplabels...
info = {'name': '_suptitle', 'x0': 0.5, 'y0': 0.98,
'ha': 'center', 'va': 'top', 'rotation': 0,
'size': 'figure.titlesize', 'weight': 'figure.titleweight'}
return self._suplabels(t, info, **kwargs)
def get_suptitle(self):
"""Return the suptitle as string or an empty string if not set."""
text_obj = self._suptitle
return "" if text_obj is None else text_obj.get_text()
@_docstring.Substitution(x0=0.5, y0=0.01, name='super xlabel', ha='center',
va='bottom', rc='label')
@_docstring.copy(_suplabels)
def supxlabel(self, t, **kwargs):
# docstring from _suplabels...
info = {'name': '_supxlabel', 'x0': 0.5, 'y0': 0.01,
'ha': 'center', 'va': 'bottom', 'rotation': 0,
'size': 'figure.labelsize', 'weight': 'figure.labelweight'}
return self._suplabels(t, info, **kwargs)
def get_supxlabel(self):
"""Return the supxlabel as string or an empty string if not set."""
text_obj = self._supxlabel
return "" if text_obj is None else text_obj.get_text()
@_docstring.Substitution(x0=0.02, y0=0.5, name='super ylabel', ha='left',
va='center', rc='label')
@_docstring.copy(_suplabels)
def supylabel(self, t, **kwargs):
# docstring from _suplabels...
info = {'name': '_supylabel', 'x0': 0.02, 'y0': 0.5,
'ha': 'left', 'va': 'center', 'rotation': 'vertical',
'rotation_mode': 'anchor', 'size': 'figure.labelsize',
'weight': 'figure.labelweight'}
return self._suplabels(t, info, **kwargs)
def get_supylabel(self):
"""Return the supylabel as string or an empty string if not set."""
text_obj = self._supylabel
return "" if text_obj is None else text_obj.get_text()
def get_edgecolor(self):
"""Get the edge color of the Figure rectangle."""
return self.patch.get_edgecolor()
def get_facecolor(self):
"""Get the face color of the Figure rectangle."""
return self.patch.get_facecolor()
def get_frameon(self):
"""
Return the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.get_visible()``.
"""
return self.patch.get_visible()
def set_linewidth(self, linewidth):
"""
Set the line width of the Figure rectangle.
Parameters
----------
linewidth : number
"""
self.patch.set_linewidth(linewidth)
def get_linewidth(self):
"""
Get the line width of the Figure rectangle.
"""
return self.patch.get_linewidth()
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle.
Parameters
----------
color : :mpltype:`color`
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle.
Parameters
----------
color : :mpltype:`color`
"""
self.patch.set_facecolor(color)
def set_frameon(self, b):
"""
Set the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.set_visible()``.
Parameters
----------
b : bool
"""
self.patch.set_visible(b)
self.stale = True
frameon = property(get_frameon, set_frameon)
def add_artist(self, artist, clip=False):
"""
Add an `.Artist` to the figure.
Usually artists are added to `~.axes.Axes` objects using
`.Axes.add_artist`; this method can be used in the rare cases where
one needs to add artists directly to the figure instead.
Parameters
----------
artist : `~matplotlib.artist.Artist`
The artist to add to the figure. If the added artist has no
transform previously set, its transform will be set to
``figure.transSubfigure``.
clip : bool, default: False
Whether the added artist should be clipped by the figure patch.
Returns
-------
`~matplotlib.artist.Artist`
The added artist.
"""
artist.set_figure(self)
self.artists.append(artist)
artist._remove_method = self.artists.remove
if not artist.is_transform_set():
artist.set_transform(self.transSubfigure)
if clip and artist.get_clip_path() is None:
artist.set_clip_path(self.patch)
self.stale = True
return artist
@_docstring.interpd
def add_axes(self, *args, **kwargs):
"""
Add an `~.axes.Axes` to the figure.
Call signatures::
add_axes(rect, projection=None, polar=False, **kwargs)
add_axes(ax)
Parameters
----------
rect : tuple (left, bottom, width, height)
The dimensions (left, bottom, width, height) of the new
`~.axes.Axes`. All quantities are in fractions of figure width and
height.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the `~.axes.Axes`. *str* is the name of
a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
axes_class : subclass type of `~.axes.Axes`, optional
The `.axes.Axes` subclass that is instantiated. This parameter
is incompatible with *projection* and *polar*. See
:ref:`axisartist_users-guide-index` for examples.
sharex, sharey : `~matplotlib.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared Axes.
label : str
A label for the returned Axes.
Returns
-------
`~.axes.Axes`, or a subclass of `~.axes.Axes`
The returned Axes class depends on the projection used. It is
`~.axes.Axes` if rectilinear projection is used and
`.projections.polar.PolarAxes` if polar projection is used.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned Axes class. The keyword arguments for the
rectilinear Axes class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used, see the actual Axes
class.
%(Axes:kwdoc)s
Notes
-----
In rare circumstances, `.add_axes` may be called with a single
argument, an Axes instance already created in the present figure but
not in the figure's list of Axes.
See Also
--------
.Figure.add_subplot
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
Some simple examples::
rect = l, b, w, h
fig = plt.figure()
fig.add_axes(rect)
fig.add_axes(rect, frameon=False, facecolor='g')
fig.add_axes(rect, polar=True)
ax = fig.add_axes(rect, projection='polar')
fig.delaxes(ax)
fig.add_axes(ax)
"""
if not len(args) and 'rect' not in kwargs:
raise TypeError("add_axes() missing 1 required positional argument: 'rect'")
elif 'rect' in kwargs:
if len(args):
raise TypeError("add_axes() got multiple values for argument 'rect'")
args = (kwargs.pop('rect'), )
if len(args) != 1:
raise _api.nargs_error("add_axes", 1, len(args))
if isinstance(args[0], Axes):
a, = args
key = a._projection_init
if a.get_figure(root=False) is not self:
raise ValueError(
"The Axes must have been created in the present figure")
else:
rect, = args
if not np.isfinite(rect).all():
raise ValueError(f'all entries in rect must be finite not {rect}')
projection_class, pkw = self._process_projection_requirements(**kwargs)
# create the new Axes using the Axes class given
a = projection_class(self, rect, **pkw)
key = (projection_class, pkw)
return self._add_axes_internal(a, key)
@_docstring.interpd
def add_subplot(self, *args, **kwargs):
"""
Add an `~.axes.Axes` to the figure as part of a subplot arrangement.
Call signatures::
add_subplot(nrows, ncols, index, **kwargs)
add_subplot(pos, **kwargs)
add_subplot(ax)
add_subplot()
Parameters
----------
*args : int, (int, int, *index*), or `.SubplotSpec`, default: (1, 1, 1)
The position of the subplot described by one of
- Three integers (*nrows*, *ncols*, *index*). The subplot will
take the *index* position on a grid with *nrows* rows and
*ncols* columns. *index* starts at 1 in the upper left corner
and increases to the right. *index* can also be a two-tuple
specifying the (*first*, *last*) indices (1-based, and including
*last*) of the subplot, e.g., ``fig.add_subplot(3, 1, (1, 2))``
makes a subplot that spans the upper 2/3 of the figure.
- A 3-digit integer. The digits are interpreted as if given
separately as three single-digit integers, i.e.
``fig.add_subplot(235)`` is the same as
``fig.add_subplot(2, 3, 5)``. Note that this can only be used
if there are no more than 9 subplots.
- A `.SubplotSpec`.
In rare circumstances, `.add_subplot` may be called with a single
argument, a subplot Axes instance already created in the
present figure but not in the figure's list of Axes.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the subplot (`~.axes.Axes`). *str* is the
name of a custom projection, see `~matplotlib.projections`. The
default None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
axes_class : subclass type of `~.axes.Axes`, optional
The `.axes.Axes` subclass that is instantiated. This parameter
is incompatible with *projection* and *polar*. See
:ref:`axisartist_users-guide-index` for examples.
sharex, sharey : `~matplotlib.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared Axes.
label : str
A label for the returned Axes.
Returns
-------
`~.axes.Axes`
The Axes of the subplot. The returned Axes can actually be an
instance of a subclass, such as `.projections.polar.PolarAxes` for
polar projections.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for the returned Axes
base class; except for the *figure* argument. The keyword arguments
for the rectilinear base class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used.
%(Axes:kwdoc)s
See Also
--------
.Figure.add_axes
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
::
fig = plt.figure()
fig.add_subplot(231)
ax1 = fig.add_subplot(2, 3, 1) # equivalent but more general
fig.add_subplot(232, frameon=False) # subplot with no frame
fig.add_subplot(233, projection='polar') # polar subplot
fig.add_subplot(234, sharex=ax1) # subplot sharing x-axis with ax1
fig.add_subplot(235, facecolor="red") # red subplot
ax1.remove() # delete ax1 from the figure
fig.add_subplot(ax1) # add ax1 back to the figure
"""
if 'figure' in kwargs:
# Axes itself allows for a 'figure' kwarg, but since we want to
# bind the created Axes to self, it is not allowed here.
raise _api.kwarg_error("add_subplot", "figure")
if (len(args) == 1
and isinstance(args[0], mpl.axes._base._AxesBase)
and args[0].get_subplotspec()):
ax = args[0]
key = ax._projection_init
if ax.get_figure(root=False) is not self:
raise ValueError("The Axes must have been created in "
"the present figure")
else:
if not args:
args = (1, 1, 1)
# Normalize correct ijk values to (i, j, k) here so that
# add_subplot(211) == add_subplot(2, 1, 1). Invalid values will
# trigger errors later (via SubplotSpec._from_subplot_args).
if (len(args) == 1 and isinstance(args[0], Integral)
and 100 <= args[0] <= 999):
args = tuple(map(int, str(args[0])))
projection_class, pkw = self._process_projection_requirements(**kwargs)
ax = projection_class(self, *args, **pkw)
key = (projection_class, pkw)
return self._add_axes_internal(ax, key)
def _add_axes_internal(self, ax, key):
"""Private helper for `add_axes` and `add_subplot`."""
self._axstack.add(ax)
if ax not in self._localaxes:
self._localaxes.append(ax)
self.sca(ax)
ax._remove_method = self.delaxes
# this is to support plt.subplot's re-selection logic
ax._projection_init = key
self.stale = True
ax.stale_callback = _stale_figure_callback
return ax
def subplots(self, nrows=1, ncols=1, *, sharex=False, sharey=False,
squeeze=True, width_ratios=None, height_ratios=None,
subplot_kw=None, gridspec_kw=None):
"""
Add a set of subplots to this figure.
This utility wrapper makes it convenient to create common layouts of
subplots in a single call.
Parameters
----------
nrows, ncols : int, default: 1
Number of rows/columns of the subplot grid.
sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
Controls sharing of x-axis (*sharex*) or y-axis (*sharey*):
- True or 'all': x- or y-axis will be shared among all subplots.
- False or 'none': each subplot x- or y-axis will be independent.
- 'row': each subplot row will share an x- or y-axis.
- 'col': each subplot column will share an x- or y-axis.
When subplots have a shared x-axis along a column, only the x tick
labels of the bottom subplot are created. Similarly, when subplots
have a shared y-axis along a row, only the y tick labels of the
first column subplot are created. To later turn other subplots'
ticklabels on, use `~matplotlib.axes.Axes.tick_params`.
When subplots have a shared axis that has units, calling
`.Axis.set_units` will update each axis with the new units.
Note that it is not possible to unshare axes.
squeeze : bool, default: True
- If True, extra dimensions are squeezed out from the returned
array of Axes:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.
- If False, no squeezing at all is done: the returned Axes object
is always a 2D array containing Axes instances, even if it ends
up being 1x1.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width. Equivalent
to ``gridspec_kw={'width_ratios': [...]}``.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height. Equivalent
to ``gridspec_kw={'height_ratios': [...]}``.
subplot_kw : dict, optional
Dict with keywords passed to the `.Figure.add_subplot` call used to
create each subplot.
gridspec_kw : dict, optional
Dict with keywords passed to the
`~matplotlib.gridspec.GridSpec` constructor used to create
the grid the subplots are placed on.
Returns
-------
`~.axes.Axes` or array of Axes
Either a single `~matplotlib.axes.Axes` object or an array of Axes
objects if more than one subplot was created. The dimensions of the
resulting array can be controlled with the *squeeze* keyword, see
above.
See Also
--------
.pyplot.subplots
.Figure.add_subplot
.pyplot.subplot
Examples
--------
::
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Create a figure
fig = plt.figure()
# Create a subplot
ax = fig.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Create two subplots and unpack the output array immediately
ax1, ax2 = fig.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Create four polar Axes and access them through the returned array
axes = fig.subplots(2, 2, subplot_kw=dict(projection='polar'))
axes[0, 0].plot(x, y)
axes[1, 1].scatter(x, y)
# Share an X-axis with each column of subplots
fig.subplots(2, 2, sharex='col')
# Share a Y-axis with each row of subplots
fig.subplots(2, 2, sharey='row')
# Share both X- and Y-axes with all subplots
fig.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
fig.subplots(2, 2, sharex=True, sharey=True)
"""
gridspec_kw = dict(gridspec_kw or {})
if height_ratios is not None:
if 'height_ratios' in gridspec_kw:
raise ValueError("'height_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['height_ratios'] = height_ratios
if width_ratios is not None:
if 'width_ratios' in gridspec_kw:
raise ValueError("'width_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['width_ratios'] = width_ratios
gs = self.add_gridspec(nrows, ncols, figure=self, **gridspec_kw)
axs = gs.subplots(sharex=sharex, sharey=sharey, squeeze=squeeze,
subplot_kw=subplot_kw)
return axs
def delaxes(self, ax):
"""
Remove the `~.axes.Axes` *ax* from the figure; update the current Axes.
"""
self._remove_axes(ax, owners=[self._axstack, self._localaxes])
def _remove_axes(self, ax, owners):
"""
Common helper for removal of standard Axes (via delaxes) and of child Axes.
Parameters
----------
ax : `~.AxesBase`
The Axes to remove.
owners
List of objects (list or _AxesStack) "owning" the Axes, from which the Axes
will be remove()d.
"""
for owner in owners:
owner.remove(ax)
self._axobservers.process("_axes_change_event", self)
self.stale = True
self._root_figure.canvas.release_mouse(ax)
for name in ax._axis_names: # Break link between any shared Axes
grouper = ax._shared_axes[name]
siblings = [other for other in grouper.get_siblings(ax) if other is not ax]
if not siblings: # Axes was not shared along this axis; we're done.
continue
grouper.remove(ax)
# Formatters and locators may previously have been associated with the now
# removed axis. Update them to point to an axis still there (we can pick
# any of them, and use the first sibling).
remaining_axis = siblings[0]._axis_map[name]
remaining_axis.get_major_formatter().set_axis(remaining_axis)
remaining_axis.get_major_locator().set_axis(remaining_axis)
remaining_axis.get_minor_formatter().set_axis(remaining_axis)
remaining_axis.get_minor_locator().set_axis(remaining_axis)
ax._twinned_axes.remove(ax) # Break link between any twinned Axes.
def clear(self, keep_observers=False):
"""
Clear the figure.
Parameters
----------
keep_observers : bool, default: False
Set *keep_observers* to True if, for example,
a gui widget is tracking the Axes in the figure.
"""
self.suppressComposite = None
# first clear the Axes in any subfigures
for subfig in self.subfigs:
subfig.clear(keep_observers=keep_observers)
self.subfigs = []
for ax in tuple(self.axes): # Iterate over the copy.
ax.clear()
self.delaxes(ax) # Remove ax from self._axstack.
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
if not keep_observers:
self._axobservers = cbook.CallbackRegistry()
self._suptitle = None
self._supxlabel = None
self._supylabel = None
self.stale = True
# synonym for `clear`.
def clf(self, keep_observers=False):
"""
[*Discouraged*] Alias for the `clear()` method.
.. admonition:: Discouraged
The use of ``clf()`` is discouraged. Use ``clear()`` instead.
Parameters
----------
keep_observers : bool, default: False
Set *keep_observers* to True if, for example,
a gui widget is tracking the Axes in the figure.
"""
return self.clear(keep_observers=keep_observers)
# Note: the docstring below is modified with replace for the pyplot
# version of this function because the method name differs (plt.figlegend)
# the replacements are:
# " legend(" -> " figlegend(" for the signatures
# "fig.legend(" -> "plt.figlegend" for the code examples
# "ax.plot" -> "plt.plot" for consistency in using pyplot when able
@_docstring.interpd
def legend(self, *args, **kwargs):
"""
Place a legend on the figure.
Call signatures::
legend()
legend(handles, labels)
legend(handles=handles)
legend(labels)
The call signatures correspond to the following different ways to use
this method:
**1. Automatic detection of elements to be shown in the legend**
The elements to be added to the legend are automatically determined,
when you do not pass in any extra arguments.
In this case, the labels are taken from the artist. You can specify
them either at artist creation or by calling the
:meth:`~.Artist.set_label` method on the artist::
ax.plot([1, 2, 3], label='Inline label')
fig.legend()
or::
line, = ax.plot([1, 2, 3])
line.set_label('Label via method')
fig.legend()
Specific lines can be excluded from the automatic legend element
selection by defining a label starting with an underscore.
This is default for all artists, so calling `.Figure.legend` without
any arguments and without setting the labels manually will result in
no legend being drawn.
**2. Explicitly listing the artists and labels in the legend**
For full control of which artists have a legend entry, it is possible
to pass an iterable of legend artists followed by an iterable of
legend labels respectively::
fig.legend([line1, line2, line3], ['label1', 'label2', 'label3'])
**3. Explicitly listing the artists in the legend**
This is similar to 2, but the labels are taken from the artists'
label properties. Example::
line1, = ax1.plot([1, 2, 3], label='label1')
line2, = ax2.plot([1, 2, 3], label='label2')
fig.legend(handles=[line1, line2])
**4. Labeling existing plot elements**
.. admonition:: Discouraged
This call signature is discouraged, because the relation between
plot elements and labels is only implicit by their order and can
easily be mixed up.
To make a legend for all artists on all Axes, call this function with
an iterable of strings, one for each legend item. For example::
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot([1, 3, 5], color='blue')
ax2.plot([2, 4, 6], color='red')
fig.legend(['the blues', 'the reds'])
Parameters
----------
handles : list of `.Artist`, optional
A list of Artists (lines, patches) to be added to the legend.
Use this together with *labels*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
The length of handles and labels should be the same in this
case. If they are not, they are truncated to the smaller length.
labels : list of str, optional
A list of labels to show next to the artists.
Use this together with *handles*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
Returns
-------
`~matplotlib.legend.Legend`
Other Parameters
----------------
%(_legend_kw_figure)s
See Also
--------
.Axes.legend
Notes
-----
Some artists are not supported by this function. See
:ref:`legend_guide` for details.
"""
handles, labels, kwargs = mlegend._parse_legend_args(self.axes, *args, **kwargs)
# explicitly set the bbox transform if the user hasn't.
kwargs.setdefault("bbox_transform", self.transSubfigure)
l = mlegend.Legend(self, handles, labels, **kwargs)
self.legends.append(l)
l._remove_method = self.legends.remove
self.stale = True
return l
@_docstring.interpd
def text(self, x, y, s, fontdict=None, **kwargs):
"""
Add text to figure.
Parameters
----------
x, y : float
The position to place the text. By default, this is in figure
coordinates, floats in [0, 1]. The coordinate system can be changed
using the *transform* keyword.
s : str
The text string.
fontdict : dict, optional
A dictionary to override the default text properties. If not given,
the defaults are determined by :rc:`font.*`. Properties passed as
*kwargs* override the corresponding ones given in *fontdict*.
Returns
-------
`~.text.Text`
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties
Other miscellaneous text parameters.
%(Text:kwdoc)s
See Also
--------
.Axes.text
.pyplot.text
"""
effective_kwargs = {
'transform': self.transSubfigure,
**(fontdict if fontdict is not None else {}),
**kwargs,
}
text = Text(x=x, y=y, text=s, **effective_kwargs)
text.set_figure(self)
text.stale_callback = _stale_figure_callback
self.texts.append(text)
text._remove_method = self.texts.remove
self.stale = True
return text
@_docstring.interpd
def colorbar(
self, mappable, cax=None, ax=None, use_gridspec=True, **kwargs):
"""
Add a colorbar to a plot.
Parameters
----------
mappable
The `matplotlib.cm.ScalarMappable` (i.e., `.AxesImage`,
`.ContourSet`, etc.) described by this colorbar. This argument is
mandatory for the `.Figure.colorbar` method but optional for the
`.pyplot.colorbar` function, which sets the default to the current
image.
Note that one can create a `.ScalarMappable` "on-the-fly" to
generate colorbars not attached to a previously drawn artist, e.g.
::
fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax)
cax : `~matplotlib.axes.Axes`, optional
Axes into which the colorbar will be drawn. If `None`, then a new
Axes is created and the space for it will be stolen from the Axes(s)
specified in *ax*.
ax : `~matplotlib.axes.Axes` or iterable or `numpy.ndarray` of Axes, optional
The one or more parent Axes from which space for a new colorbar Axes
will be stolen. This parameter is only used if *cax* is not set.
Defaults to the Axes that contains the mappable used to create the
colorbar.
use_gridspec : bool, optional
If *cax* is ``None``, a new *cax* is created as an instance of
Axes. If *ax* is positioned with a subplotspec and *use_gridspec*
is ``True``, then *cax* is also positioned with a subplotspec.
Returns
-------
colorbar : `~matplotlib.colorbar.Colorbar`
Other Parameters
----------------
%(_make_axes_kw_doc)s
%(_colormap_kw_doc)s
Notes
-----
If *mappable* is a `~.contour.ContourSet`, its *extend* kwarg is
included automatically.
The *shrink* kwarg provides a simple way to scale the colorbar with
respect to the Axes. Note that if *cax* is specified, it determines the
size of the colorbar, and *shrink* and *aspect* are ignored.
For more precise control, you can manually specify the positions of the
axes objects in which the mappable and the colorbar are drawn. In this
case, do not use any of the Axes properties kwargs.
It is known that some vector graphics viewers (svg and pdf) render
white gaps between segments of the colorbar. This is due to bugs in
the viewers, not Matplotlib. As a workaround, the colorbar can be
rendered with overlapping segments::
cbar = colorbar()
cbar.solids.set_edgecolor("face")
draw()
However, this has negative consequences in other circumstances, e.g.
with semi-transparent images (alpha < 1) and colorbar extensions;
therefore, this workaround is not used by default (see issue #1188).
"""
if ax is None:
ax = getattr(mappable, "axes", None)
if cax is None:
if ax is None:
raise ValueError(
'Unable to determine Axes to steal space for Colorbar. '
'Either provide the *cax* argument to use as the Axes for '
'the Colorbar, provide the *ax* argument to steal space '
'from it, or add *mappable* to an Axes.')
fig = ( # Figure of first Axes; logic copied from make_axes.
[*ax.flat] if isinstance(ax, np.ndarray)
else [*ax] if np.iterable(ax)
else [ax])[0].get_figure(root=False)
current_ax = fig.gca()
if (fig.get_layout_engine() is not None and
not fig.get_layout_engine().colorbar_gridspec):
use_gridspec = False
if (use_gridspec
and isinstance(ax, mpl.axes._base._AxesBase)
and ax.get_subplotspec()):
cax, kwargs = cbar.make_axes_gridspec(ax, **kwargs)
else:
cax, kwargs = cbar.make_axes(ax, **kwargs)
# make_axes calls add_{axes,subplot} which changes gca; undo that.
fig.sca(current_ax)
cax.grid(visible=False, which='both', axis='both')
if (hasattr(mappable, "get_figure") and
(mappable_host_fig := mappable.get_figure(root=True)) is not None):
# Warn in case of mismatch
if mappable_host_fig is not self._root_figure:
_api.warn_external(
f'Adding colorbar to a different Figure '
f'{repr(mappable_host_fig)} than '
f'{repr(self._root_figure)} which '
f'fig.colorbar is called on.')
NON_COLORBAR_KEYS = [ # remove kws that cannot be passed to Colorbar
'fraction', 'pad', 'shrink', 'aspect', 'anchor', 'panchor']
cb = cbar.Colorbar(cax, mappable, **{
k: v for k, v in kwargs.items() if k not in NON_COLORBAR_KEYS})
cax.get_figure(root=False).stale = True
return cb
def subplots_adjust(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Adjust the subplot layout parameters.
Unset parameters are left unmodified; initial values are given by
:rc:`figure.subplot.[name]`.
.. plot:: _embedded_plots/figure_subplots_adjust.py
Parameters
----------
left : float, optional
The position of the left edge of the subplots,
as a fraction of the figure width.
right : float, optional
The position of the right edge of the subplots,
as a fraction of the figure width.
bottom : float, optional
The position of the bottom edge of the subplots,
as a fraction of the figure height.
top : float, optional
The position of the top edge of the subplots,
as a fraction of the figure height.
wspace : float, optional
The width of the padding between subplots,
as a fraction of the average Axes width.
hspace : float, optional
The height of the padding between subplots,
as a fraction of the average Axes height.
"""
if (self.get_layout_engine() is not None and
not self.get_layout_engine().adjust_compatible):
_api.warn_external(
"This figure was using a layout engine that is "
"incompatible with subplots_adjust and/or tight_layout; "
"not calling subplots_adjust.")
return
self.subplotpars.update(left, bottom, right, top, wspace, hspace)
for ax in self.axes:
if ax.get_subplotspec() is not None:
ax._set_position(ax.get_subplotspec().get_position(self))
self.stale = True
def align_xlabels(self, axs=None):
"""
Align the xlabels of subplots in the same subplot row if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the bottom, it is aligned with labels on Axes that
also have their label on the bottom and that have the same
bottom-most subplot row. If the label is on the top,
it is aligned with labels on Axes with the same top-most row.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list of (or `~numpy.ndarray`) `~matplotlib.axes.Axes`
to align the xlabels.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_titles
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with rotated xtick labels::
fig, axs = plt.subplots(1, 2)
for tick in axs[0].get_xticklabels():
tick.set_rotation(55)
axs[0].set_xlabel('XLabel 0')
axs[1].set_xlabel('XLabel 1')
fig.align_xlabels()
"""
if axs is None:
axs = self.axes
axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]
for ax in axs:
_log.debug(' Working on: %s', ax.get_xlabel())
rowspan = ax.get_subplotspec().rowspan
pos = ax.xaxis.get_label_position() # top or bottom
# Search through other Axes for label positions that are same as
# this one and that share the appropriate row number.
# Add to a grouper associated with each Axes of siblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc.xaxis.get_label_position() == pos:
rowspanc = axc.get_subplotspec().rowspan
if (pos == 'top' and rowspan.start == rowspanc.start or
pos == 'bottom' and rowspan.stop == rowspanc.stop):
# grouper for groups of xlabels to align
self._align_label_groups['x'].join(ax, axc)
def align_ylabels(self, axs=None):
"""
Align the ylabels of subplots in the same subplot column if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the left, it is aligned with labels on Axes that
also have their label on the left and that have the same
left-most subplot column. If the label is on the right,
it is aligned with labels on Axes with the same right-most column.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or `~numpy.ndarray`) of `~matplotlib.axes.Axes`
to align the ylabels.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_titles
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with large yticks labels::
fig, axs = plt.subplots(2, 1)
axs[0].plot(np.arange(0, 1000, 50))
axs[0].set_ylabel('YLabel 0')
axs[1].set_ylabel('YLabel 1')
fig.align_ylabels()
"""
if axs is None:
axs = self.axes
axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]
for ax in axs:
_log.debug(' Working on: %s', ax.get_ylabel())
colspan = ax.get_subplotspec().colspan
pos = ax.yaxis.get_label_position() # left or right
# Search through other Axes for label positions that are same as
# this one and that share the appropriate column number.
# Add to a list associated with each Axes of siblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc.yaxis.get_label_position() == pos:
colspanc = axc.get_subplotspec().colspan
if (pos == 'left' and colspan.start == colspanc.start or
pos == 'right' and colspan.stop == colspanc.stop):
# grouper for groups of ylabels to align
self._align_label_groups['y'].join(ax, axc)
def align_titles(self, axs=None):
"""
Align the titles of subplots in the same subplot row if title
alignment is being done automatically (i.e. the title position is
not manually set).
Alignment persists for draw events after this is called.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list of (or ndarray) `~matplotlib.axes.Axes`
to align the titles.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with titles::
fig, axs = plt.subplots(1, 2)
axs[0].set_aspect('equal')
axs[0].set_title('Title 0')
axs[1].set_title('Title 1')
fig.align_titles()
"""
if axs is None:
axs = self.axes
axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]
for ax in axs:
_log.debug(' Working on: %s', ax.get_title())
rowspan = ax.get_subplotspec().rowspan
for axc in axs:
rowspanc = axc.get_subplotspec().rowspan
if (rowspan.start == rowspanc.start):
self._align_label_groups['title'].join(ax, axc)
def align_labels(self, axs=None):
"""
Align the xlabels and ylabels of subplots with the same subplots
row or column (respectively) if label alignment is being
done automatically (i.e. the label position is not manually set).
Alignment persists for draw events after this is called.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or `~numpy.ndarray`) of `~matplotlib.axes.Axes`
to align the labels.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_titles
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
"""
self.align_xlabels(axs=axs)
self.align_ylabels(axs=axs)
def add_gridspec(self, nrows=1, ncols=1, **kwargs):
"""
Low-level API for creating a `.GridSpec` that has this figure as a parent.
This is a low-level API, allowing you to create a gridspec and
subsequently add subplots based on the gridspec. Most users do
not need that freedom and should use the higher-level methods
`~.Figure.subplots` or `~.Figure.subplot_mosaic`.
Parameters
----------
nrows : int, default: 1
Number of rows in grid.
ncols : int, default: 1
Number of columns in grid.
Returns
-------
`.GridSpec`
Other Parameters
----------------
**kwargs
Keyword arguments are passed to `.GridSpec`.
See Also
--------
matplotlib.pyplot.subplots
Examples
--------
Adding a subplot that spans two rows::
fig = plt.figure()
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
# spans two rows:
ax3 = fig.add_subplot(gs[:, 1])
"""
_ = kwargs.pop('figure', None) # pop in case user has added this...
gs = GridSpec(nrows=nrows, ncols=ncols, figure=self, **kwargs)
return gs
def subfigures(self, nrows=1, ncols=1, squeeze=True,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None,
**kwargs):
"""
Add a set of subfigures to this figure or subfigure.
A subfigure has the same artist methods as a figure, and is logically
the same as a figure, but cannot print itself.
See :doc:`/gallery/subplots_axes_and_figures/subfigures`.
.. versionchanged:: 3.10
subfigures are now added in row-major order.
Parameters
----------
nrows, ncols : int, default: 1
Number of rows/columns of the subfigure grid.
squeeze : bool, default: True
If True, extra dimensions are squeezed out from the returned
array of subfigures.
wspace, hspace : float, default: None
The amount of width/height reserved for space between subfigures,
expressed as a fraction of the average subfigure width/height.
If not given, the values will be inferred from rcParams if using
constrained layout (see `~.ConstrainedLayoutEngine`), or zero if
not using a layout engine.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height.
"""
gs = GridSpec(nrows=nrows, ncols=ncols, figure=self,
wspace=wspace, hspace=hspace,
width_ratios=width_ratios,
height_ratios=height_ratios,
left=0, right=1, bottom=0, top=1)
sfarr = np.empty((nrows, ncols), dtype=object)
for i in range(nrows):
for j in range(ncols):
sfarr[i, j] = self.add_subfigure(gs[i, j], **kwargs)
if self.get_layout_engine() is None and (wspace is not None or
hspace is not None):
# Gridspec wspace and hspace is ignored on subfigure instantiation,
# and no space is left. So need to account for it here if required.
bottoms, tops, lefts, rights = gs.get_grid_positions(self)
for sfrow, bottom, top in zip(sfarr, bottoms, tops):
for sf, left, right in zip(sfrow, lefts, rights):
bbox = Bbox.from_extents(left, bottom, right, top)
sf._redo_transform_rel_fig(bbox=bbox)
if squeeze:
# Discarding unneeded dimensions that equal 1. If we only have one
# subfigure, just return it instead of a 1-element array.
return sfarr.item() if sfarr.size == 1 else sfarr.squeeze()
else:
# Returned axis array will be always 2-d, even if nrows=ncols=1.
return sfarr
def add_subfigure(self, subplotspec, **kwargs):
"""
Add a `.SubFigure` to the figure as part of a subplot arrangement.
Parameters
----------
subplotspec : `.gridspec.SubplotSpec`
Defines the region in a parent gridspec where the subfigure will
be placed.
Returns
-------
`.SubFigure`
Other Parameters
----------------
**kwargs
Are passed to the `.SubFigure` object.
See Also
--------
.Figure.subfigures
"""
sf = SubFigure(self, subplotspec, **kwargs)
self.subfigs += [sf]
sf._remove_method = self.subfigs.remove
sf.stale_callback = _stale_figure_callback
self.stale = True
return sf
def sca(self, a):
"""Set the current Axes to be *a* and return *a*."""
self._axstack.bubble(a)
self._axobservers.process("_axes_change_event", self)
return a
def gca(self):
"""
Get the current Axes.
If there is currently no Axes on this Figure, a new one is created
using `.Figure.add_subplot`. (To test whether there is currently an
Axes on a Figure, check whether ``figure.axes`` is empty. To test
whether there is currently a Figure on the pyplot figure stack, check
whether `.pyplot.get_fignums()` is empty.)
"""
ax = self._axstack.current()
return ax if ax is not None else self.add_subplot()
def _gci(self):
# Helper for `~matplotlib.pyplot.gci`. Do not use elsewhere.
"""
Get the current colorable artist.
Specifically, returns the current `.ScalarMappable` instance (`.Image`
created by `imshow` or `figimage`, `.Collection` created by `pcolor` or
`scatter`, etc.), or *None* if no such instance has been defined.
The current image is an attribute of the current Axes, or the nearest
earlier Axes in the current figure that contains an image.
Notes
-----
Historically, the only colorable artists were images; hence the name
``gci`` (get current image).
"""
# Look first for an image in the current Axes.
ax = self._axstack.current()
if ax is None:
return None
im = ax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def _process_projection_requirements(self, *, axes_class=None, polar=False,
projection=None, **kwargs):
"""
Handle the args/kwargs to add_axes/add_subplot/gca, returning::
(axes_proj_class, proj_class_kwargs)
which can be used for new Axes initialization/identification.
"""
if axes_class is not None:
if polar or projection is not None:
raise ValueError(
"Cannot combine 'axes_class' and 'projection' or 'polar'")
projection_class = axes_class
else:
if polar:
if projection is not None and projection != 'polar':
raise ValueError(
f"polar={polar}, yet projection={projection!r}. "
"Only one of these arguments should be supplied."
)
projection = 'polar'
if isinstance(projection, str) or projection is None:
projection_class = projections.get_projection_class(projection)
elif hasattr(projection, '_as_mpl_axes'):
projection_class, extra_kwargs = projection._as_mpl_axes()
kwargs.update(**extra_kwargs)
else:
raise TypeError(
f"projection must be a string, None or implement a "
f"_as_mpl_axes method, not {projection!r}")
return projection_class, kwargs
def get_default_bbox_extra_artists(self):
"""
Return a list of Artists typically used in `.Figure.get_tightbbox`.
"""
bbox_artists = [artist for artist in self.get_children()
if (artist.get_visible() and artist.get_in_layout())]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
return bbox_artists
def get_tightbbox(self, renderer=None, *, bbox_extra_artists=None):
"""
Return a (tight) bounding box of the figure *in inches*.
Note that `.FigureBase` differs from all other artists, which return
their `.Bbox` in pixels.
Artists that have ``artist.set_in_layout(False)`` are not included
in the bbox.
Parameters
----------
renderer : `.RendererBase` subclass
Renderer that will be used to draw the figures (i.e.
``fig.canvas.get_renderer()``)
bbox_extra_artists : list of `.Artist` or ``None``
List of artists to include in the tight bounding box. If
``None`` (default), then all artist children of each Axes are
included in the tight bounding box.
Returns
-------
`.BboxBase`
containing the bounding box (in figure inches).
"""
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
bb = []
if bbox_extra_artists is None:
artists = [artist for artist in self.get_children()
if (artist not in self.axes and artist.get_visible()
and artist.get_in_layout())]
else:
artists = bbox_extra_artists
for a in artists:
bbox = a.get_tightbbox(renderer)
if bbox is not None:
bb.append(bbox)
for ax in self.axes:
if ax.get_visible():
# some Axes don't take the bbox_extra_artists kwarg so we
# need this conditional....
try:
bbox = ax.get_tightbbox(
renderer, bbox_extra_artists=bbox_extra_artists)
except TypeError:
bbox = ax.get_tightbbox(renderer)
bb.append(bbox)
bb = [b for b in bb
if (np.isfinite(b.width) and np.isfinite(b.height)
and (b.width != 0 or b.height != 0))]
isfigure = hasattr(self, 'bbox_inches')
if len(bb) == 0:
if isfigure:
return self.bbox_inches
else:
# subfigures do not have bbox_inches, but do have a bbox
bb = [self.bbox]
_bbox = Bbox.union(bb)
if isfigure:
# transform from pixels to inches...
_bbox = TransformedBbox(_bbox, self.dpi_scale_trans.inverted())
return _bbox
@staticmethod
def _norm_per_subplot_kw(per_subplot_kw):
expanded = {}
for k, v in per_subplot_kw.items():
if isinstance(k, tuple):
for sub_key in k:
if sub_key in expanded:
raise ValueError(f'The key {sub_key!r} appears multiple times.')
expanded[sub_key] = v
else:
if k in expanded:
raise ValueError(f'The key {k!r} appears multiple times.')
expanded[k] = v
return expanded
@staticmethod
def _normalize_grid_string(layout):
if '\n' not in layout:
# single-line string
return [list(ln) for ln in layout.split(';')]
else:
# multi-line string
layout = inspect.cleandoc(layout)
return [list(ln) for ln in layout.strip('\n').split('\n')]
def subplot_mosaic(self, mosaic, *, sharex=False, sharey=False,
width_ratios=None, height_ratios=None,
empty_sentinel='.',
subplot_kw=None, per_subplot_kw=None, gridspec_kw=None):
"""
Build a layout of Axes based on ASCII art or nested lists.
This is a helper function to build complex GridSpec layouts visually.
See :ref:`mosaic`
for an example and full API documentation
Parameters
----------
mosaic : list of list of {hashable or nested} or str
A visual layout of how you want your Axes to be arranged
labeled as strings. For example ::
x = [['A panel', 'A panel', 'edge'],
['C panel', '.', 'edge']]
produces 4 Axes:
- 'A panel' which is 1 row high and spans the first two columns
- 'edge' which is 2 rows high and is on the right edge
- 'C panel' which in 1 row and 1 column wide in the bottom left
- a blank space 1 row and 1 column wide in the bottom center
Any of the entries in the layout can be a list of lists
of the same form to create nested layouts.
If input is a str, then it can either be a multi-line string of
the form ::
'''
AAE
C.E
'''
where each character is a column and each line is a row. Or it
can be a single-line string where rows are separated by ``;``::
'AB;CC'
The string notation allows only single character Axes labels and
does not support nesting but is very terse.
The Axes identifiers may be `str` or a non-iterable hashable
object (e.g. `tuple` s may not be used).
sharex, sharey : bool, default: False
If True, the x-axis (*sharex*) or y-axis (*sharey*) will be shared
among all subplots. In that case, tick label visibility and axis
units behave as for `subplots`. If False, each subplot's x- or
y-axis will be independent.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width. Equivalent
to ``gridspec_kw={'width_ratios': [...]}``. In the case of nested
layouts, this argument applies only to the outer layout.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height. Equivalent
to ``gridspec_kw={'height_ratios': [...]}``. In the case of nested
layouts, this argument applies only to the outer layout.
subplot_kw : dict, optional
Dictionary with keywords passed to the `.Figure.add_subplot` call
used to create each subplot. These values may be overridden by
values in *per_subplot_kw*.
per_subplot_kw : dict, optional
A dictionary mapping the Axes identifiers or tuples of identifiers
to a dictionary of keyword arguments to be passed to the
`.Figure.add_subplot` call used to create each subplot. The values
in these dictionaries have precedence over the values in
*subplot_kw*.
If *mosaic* is a string, and thus all keys are single characters,
it is possible to use a single string instead of a tuple as keys;
i.e. ``"AB"`` is equivalent to ``("A", "B")``.
.. versionadded:: 3.7
gridspec_kw : dict, optional
Dictionary with keywords passed to the `.GridSpec` constructor used
to create the grid the subplots are placed on. In the case of
nested layouts, this argument applies only to the outer layout.
For more complex layouts, users should use `.Figure.subfigures`
to create the nesting.
empty_sentinel : object, optional
Entry in the layout to mean "leave this space empty". Defaults
to ``'.'``. Note, if *layout* is a string, it is processed via
`inspect.cleandoc` to remove leading white space, which may
interfere with using white-space as the empty sentinel.
Returns
-------
dict[label, Axes]
A dictionary mapping the labels to the Axes objects. The order of
the Axes is left-to-right and top-to-bottom of their position in the
total layout.
"""
subplot_kw = subplot_kw or {}
gridspec_kw = dict(gridspec_kw or {})
per_subplot_kw = per_subplot_kw or {}
if height_ratios is not None:
if 'height_ratios' in gridspec_kw:
raise ValueError("'height_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['height_ratios'] = height_ratios
if width_ratios is not None:
if 'width_ratios' in gridspec_kw:
raise ValueError("'width_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['width_ratios'] = width_ratios
# special-case string input
if isinstance(mosaic, str):
mosaic = self._normalize_grid_string(mosaic)
per_subplot_kw = {
tuple(k): v for k, v in per_subplot_kw.items()
}
per_subplot_kw = self._norm_per_subplot_kw(per_subplot_kw)
# Only accept strict bools to allow a possible future API expansion.
_api.check_isinstance(bool, sharex=sharex, sharey=sharey)
def _make_array(inp):
"""
Convert input into 2D array
We need to have this internal function rather than
``np.asarray(..., dtype=object)`` so that a list of lists
of lists does not get converted to an array of dimension > 2.
Returns
-------
2D object array
"""
r0, *rest = inp
if isinstance(r0, str):
raise ValueError('List mosaic specification must be 2D')
for j, r in enumerate(rest, start=1):
if isinstance(r, str):
raise ValueError('List mosaic specification must be 2D')
if len(r0) != len(r):
raise ValueError(
"All of the rows must be the same length, however "
f"the first row ({r0!r}) has length {len(r0)} "
f"and row {j} ({r!r}) has length {len(r)}."
)
out = np.zeros((len(inp), len(r0)), dtype=object)
for j, r in enumerate(inp):
for k, v in enumerate(r):
out[j, k] = v
return out
def _identify_keys_and_nested(mosaic):
"""
Given a 2D object array, identify unique IDs and nested mosaics
Parameters
----------
mosaic : 2D object array
Returns
-------
unique_ids : tuple
The unique non-sub mosaic entries in this mosaic
nested : dict[tuple[int, int], 2D object array]
"""
# make sure we preserve the user supplied order
unique_ids = cbook._OrderedSet()
nested = {}
for j, row in enumerate(mosaic):
for k, v in enumerate(row):
if v == empty_sentinel:
continue
elif not cbook.is_scalar_or_string(v):
nested[(j, k)] = _make_array(v)
else:
unique_ids.add(v)
return tuple(unique_ids), nested
def _do_layout(gs, mosaic, unique_ids, nested):
"""
Recursively do the mosaic.
Parameters
----------
gs : GridSpec
mosaic : 2D object array
The input converted to a 2D array for this level.
unique_ids : tuple
The identified scalar labels at this level of nesting.
nested : dict[tuple[int, int]], 2D object array
The identified nested mosaics, if any.
Returns
-------
dict[label, Axes]
A flat dict of all of the Axes created.
"""
output = dict()
# we need to merge together the Axes at this level and the Axes
# in the (recursively) nested sub-mosaics so that we can add
# them to the figure in the "natural" order if you were to
# ravel in c-order all of the Axes that will be created
#
# This will stash the upper left index of each object (axes or
# nested mosaic) at this level
this_level = dict()
# go through the unique keys,
for name in unique_ids:
# sort out where each axes starts/ends
indx = np.argwhere(mosaic == name)
start_row, start_col = np.min(indx, axis=0)
end_row, end_col = np.max(indx, axis=0) + 1
# and construct the slice object
slc = (slice(start_row, end_row), slice(start_col, end_col))
# some light error checking
if (mosaic[slc] != name).any():
raise ValueError(
f"While trying to layout\n{mosaic!r}\n"
f"we found that the label {name!r} specifies a "
"non-rectangular or non-contiguous area.")
# and stash this slice for later
this_level[(start_row, start_col)] = (name, slc, 'axes')
# do the same thing for the nested mosaics (simpler because these
# cannot be spans yet!)
for (j, k), nested_mosaic in nested.items():
this_level[(j, k)] = (None, nested_mosaic, 'nested')
# now go through the things in this level and add them
# in order left-to-right top-to-bottom
for key in sorted(this_level):
name, arg, method = this_level[key]
# we are doing some hokey function dispatch here based
# on the 'method' string stashed above to sort out if this
# element is an Axes or a nested mosaic.
if method == 'axes':
slc = arg
# add a single Axes
if name in output:
raise ValueError(f"There are duplicate keys {name} "
f"in the layout\n{mosaic!r}")
ax = self.add_subplot(
gs[slc], **{
'label': str(name),
**subplot_kw,
**per_subplot_kw.get(name, {})
}
)
output[name] = ax
elif method == 'nested':
nested_mosaic = arg
j, k = key
# recursively add the nested mosaic
rows, cols = nested_mosaic.shape
nested_output = _do_layout(
gs[j, k].subgridspec(rows, cols),
nested_mosaic,
*_identify_keys_and_nested(nested_mosaic)
)
overlap = set(output) & set(nested_output)
if overlap:
raise ValueError(
f"There are duplicate keys {overlap} "
f"between the outer layout\n{mosaic!r}\n"
f"and the nested layout\n{nested_mosaic}"
)
output.update(nested_output)
else:
raise RuntimeError("This should never happen")
return output
mosaic = _make_array(mosaic)
rows, cols = mosaic.shape
gs = self.add_gridspec(rows, cols, **gridspec_kw)
ret = _do_layout(gs, mosaic, *_identify_keys_and_nested(mosaic))
ax0 = next(iter(ret.values()))
for ax in ret.values():
if sharex:
ax.sharex(ax0)
ax._label_outer_xaxis(skip_non_rectangular_axes=True)
if sharey:
ax.sharey(ax0)
ax._label_outer_yaxis(skip_non_rectangular_axes=True)
if extra := set(per_subplot_kw) - set(ret):
raise ValueError(
f"The keys {extra} are in *per_subplot_kw* "
"but not in the mosaic."
)
return ret
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.stale_callback = _stale_figure_callback
a.set_transform(self.transSubfigure)
@_docstring.interpd
class SubFigure(FigureBase):
"""
Logical figure that can be placed inside a figure.
See :ref:`figure-api-subfigure` for an index of methods on this class.
Typically instantiated using `.Figure.add_subfigure` or
`.SubFigure.add_subfigure`, or `.SubFigure.subfigures`. A subfigure has
the same methods as a figure except for those particularly tied to the size
or dpi of the figure, and is confined to a prescribed region of the figure.
For example the following puts two subfigures side-by-side::
fig = plt.figure()
sfigs = fig.subfigures(1, 2)
axsL = sfigs[0].subplots(1, 2)
axsR = sfigs[1].subplots(2, 1)
See :doc:`/gallery/subplots_axes_and_figures/subfigures`
"""
def __init__(self, parent, subplotspec, *,
facecolor=None,
edgecolor=None,
linewidth=0.0,
frameon=None,
**kwargs):
"""
Parameters
----------
parent : `.Figure` or `.SubFigure`
Figure or subfigure that contains the SubFigure. SubFigures
can be nested.
subplotspec : `.gridspec.SubplotSpec`
Defines the region in a parent gridspec where the subfigure will
be placed.
facecolor : default: ``"none"``
The figure patch face color; transparent by default.
edgecolor : default: :rc:`figure.edgecolor`
The figure patch edge color.
linewidth : float
The linewidth of the frame (i.e. the edge linewidth of the figure
patch).
frameon : bool, default: :rc:`figure.frameon`
If ``False``, suppress drawing the figure background patch.
Other Parameters
----------------
**kwargs : `.SubFigure` properties, optional
%(SubFigure:kwdoc)s
"""
super().__init__(**kwargs)
if facecolor is None:
facecolor = "none"
if edgecolor is None:
edgecolor = mpl.rcParams['figure.edgecolor']
if frameon is None:
frameon = mpl.rcParams['figure.frameon']
self._subplotspec = subplotspec
self._parent = parent
self._root_figure = parent._root_figure
# subfigures use the parent axstack
self._axstack = parent._axstack
self.subplotpars = parent.subplotpars
self.dpi_scale_trans = parent.dpi_scale_trans
self._axobservers = parent._axobservers
self.transFigure = parent.transFigure
self.bbox_relative = Bbox.null()
self._redo_transform_rel_fig()
self.figbbox = self._parent.figbbox
self.bbox = TransformedBbox(self.bbox_relative,
self._parent.transSubfigure)
self.transSubfigure = BboxTransformTo(self.bbox)
self.patch = Rectangle(
xy=(0, 0), width=1, height=1, visible=frameon,
facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth,
# Don't let the figure patch influence bbox calculation.
in_layout=False, transform=self.transSubfigure)
self._set_artist_props(self.patch)
self.patch.set_antialiased(False)
@property
def canvas(self):
return self._parent.canvas
@property
def dpi(self):
return self._parent.dpi
@dpi.setter
def dpi(self, value):
self._parent.dpi = value
def get_dpi(self):
"""
Return the resolution of the parent figure in dots-per-inch as a float.
"""
return self._parent.dpi
def set_dpi(self, val):
"""
Set the resolution of parent figure in dots-per-inch.
Parameters
----------
val : float
"""
self._parent.dpi = val
self.stale = True
def _get_renderer(self):
return self._parent._get_renderer()
def _redo_transform_rel_fig(self, bbox=None):
"""
Make the transSubfigure bbox relative to Figure transform.
Parameters
----------
bbox : bbox or None
If not None, then the bbox is used for relative bounding box.
Otherwise, it is calculated from the subplotspec.
"""
if bbox is not None:
self.bbox_relative.p0 = bbox.p0
self.bbox_relative.p1 = bbox.p1
return
# need to figure out *where* this subplotspec is.
gs = self._subplotspec.get_gridspec()
wr = np.asarray(gs.get_width_ratios())
hr = np.asarray(gs.get_height_ratios())
dx = wr[self._subplotspec.colspan].sum() / wr.sum()
dy = hr[self._subplotspec.rowspan].sum() / hr.sum()
x0 = wr[:self._subplotspec.colspan.start].sum() / wr.sum()
y0 = 1 - hr[:self._subplotspec.rowspan.stop].sum() / hr.sum()
self.bbox_relative.p0 = (x0, y0)
self.bbox_relative.p1 = (x0 + dx, y0 + dy)
def get_constrained_layout(self):
"""
Return whether constrained layout is being used.
See :ref:`constrainedlayout_guide`.
"""
return self._parent.get_constrained_layout()
def get_constrained_layout_pads(self, relative=False):
"""
Get padding for ``constrained_layout``.
Returns a list of ``w_pad, h_pad`` in inches and
``wspace`` and ``hspace`` as fractions of the subplot.
See :ref:`constrainedlayout_guide`.
Parameters
----------
relative : bool
If `True`, then convert from inches to figure relative.
"""
return self._parent.get_constrained_layout_pads(relative=relative)
def get_layout_engine(self):
return self._parent.get_layout_engine()
@property
def axes(self):
"""
List of Axes in the SubFigure. You can access and modify the Axes
in the SubFigure through this list.
Modifying this list has no effect. Instead, use `~.SubFigure.add_axes`,
`~.SubFigure.add_subplot` or `~.SubFigure.delaxes` to add or remove an
Axes.
Note: The `.SubFigure.axes` property and `~.SubFigure.get_axes` method
are equivalent.
"""
return self._localaxes[:]
get_axes = axes.fget
def draw(self, renderer):
# docstring inherited
# draw the figure bounding box, perhaps none for white figure
if not self.get_visible():
return
artists = self._get_draw_artists(renderer)
try:
renderer.open_group('subfigure', gid=self.get_gid())
self.patch.draw(renderer)
mimage._draw_list_compositing_images(
renderer, self, artists, self.get_figure(root=True).suppressComposite)
renderer.close_group('subfigure')
finally:
self.stale = False
@_docstring.interpd
class Figure(FigureBase):
"""
The top level container for all the plot elements.
See `matplotlib.figure` for an index of class methods.
Attributes
----------
patch
The `.Rectangle` instance representing the figure background patch.
suppressComposite
For multiple images, the figure will make composite images
depending on the renderer option_image_nocomposite function. If
*suppressComposite* is a boolean, this will override the renderer.
"""
# we want to cache the fonts and mathtext at a global level so that when
# multiple figures are created we can reuse them. This helps with a bug on
# windows where the creation of too many figures leads to too many open
# file handles and improves the performance of parsing mathtext. However,
# these global caches are not thread safe. The solution here is to let the
# Figure acquire a shared lock at the start of the draw, and release it when it
# is done. This allows multiple renderers to share the cached fonts and
# parsed text, but only one figure can draw at a time and so the font cache
# and mathtext cache are used by only one renderer at a time.
_render_lock = threading.RLock()
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __repr__(self):
return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format(
clsname=self.__class__.__name__,
h=self.bbox.size[0], w=self.bbox.size[1],
naxes=len(self.axes),
)
def __init__(self,
figsize=None,
dpi=None,
*,
facecolor=None,
edgecolor=None,
linewidth=0.0,
frameon=None,
subplotpars=None, # rc figure.subplot.*
tight_layout=None, # rc figure.autolayout
constrained_layout=None, # rc figure.constrained_layout.use
layout=None,
**kwargs
):
"""
Parameters
----------
figsize : 2-tuple of floats, default: :rc:`figure.figsize`
Figure dimension ``(width, height)`` in inches.
dpi : float, default: :rc:`figure.dpi`
Dots per inch.
facecolor : default: :rc:`figure.facecolor`
The figure patch facecolor.
edgecolor : default: :rc:`figure.edgecolor`
The figure patch edge color.
linewidth : float
The linewidth of the frame (i.e. the edge linewidth of the figure
patch).
frameon : bool, default: :rc:`figure.frameon`
If ``False``, suppress drawing the figure background patch.
subplotpars : `~matplotlib.gridspec.SubplotParams`
Subplot parameters. If not given, the default subplot
parameters :rc:`figure.subplot.*` are used.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
Whether to use the tight layout mechanism. See `.set_tight_layout`.
.. admonition:: Discouraged
The use of this parameter is discouraged. Please use
``layout='tight'`` instead for the common case of
``tight_layout=True`` and use `.set_tight_layout` otherwise.
constrained_layout : bool, default: :rc:`figure.constrained_layout.use`
This is equal to ``layout='constrained'``.
.. admonition:: Discouraged
The use of this parameter is discouraged. Please use
``layout='constrained'`` instead.
layout : {'constrained', 'compressed', 'tight', 'none', `.LayoutEngine`, \
None}, default: None
The layout mechanism for positioning of plot elements to avoid
overlapping Axes decorations (labels, ticks, etc). Note that
layout managers can have significant performance penalties.
- 'constrained': The constrained layout solver adjusts Axes sizes
to avoid overlapping Axes decorations. Can handle complex plot
layouts and colorbars, and is thus recommended.
See :ref:`constrainedlayout_guide` for examples.
- 'compressed': uses the same algorithm as 'constrained', but
removes extra space between fixed-aspect-ratio Axes. Best for
simple grids of Axes.
- 'tight': Use the tight layout mechanism. This is a relatively
simple algorithm that adjusts the subplot parameters so that
decorations do not overlap.
See :ref:`tight_layout_guide` for examples.
- 'none': Do not use a layout engine.
- A `.LayoutEngine` instance. Builtin layout classes are
`.ConstrainedLayoutEngine` and `.TightLayoutEngine`, more easily
accessible by 'constrained' and 'tight'. Passing an instance
allows third parties to provide their own layout engine.
If not given, fall back to using the parameters *tight_layout* and
*constrained_layout*, including their config defaults
:rc:`figure.autolayout` and :rc:`figure.constrained_layout.use`.
Other Parameters
----------------
**kwargs : `.Figure` properties, optional
%(Figure:kwdoc)s
"""
super().__init__(**kwargs)
self._root_figure = self
self._layout_engine = None
if layout is not None:
if (tight_layout is not None):
_api.warn_external(
"The Figure parameters 'layout' and 'tight_layout' cannot "
"be used together. Please use 'layout' only.")
if (constrained_layout is not None):
_api.warn_external(
"The Figure parameters 'layout' and 'constrained_layout' "
"cannot be used together. Please use 'layout' only.")
self.set_layout_engine(layout=layout)
elif tight_layout is not None:
if constrained_layout is not None:
_api.warn_external(
"The Figure parameters 'tight_layout' and "
"'constrained_layout' cannot be used together. Please use "
"'layout' parameter")
self.set_layout_engine(layout='tight')
if isinstance(tight_layout, dict):
self.get_layout_engine().set(**tight_layout)
elif constrained_layout is not None:
if isinstance(constrained_layout, dict):
self.set_layout_engine(layout='constrained')
self.get_layout_engine().set(**constrained_layout)
elif constrained_layout:
self.set_layout_engine(layout='constrained')
else:
# everything is None, so use default:
self.set_layout_engine(layout=layout)
# Callbacks traditionally associated with the canvas (and exposed with
# a proxy property), but that actually need to be on the figure for
# pickling.
self._canvas_callbacks = cbook.CallbackRegistry(
signals=FigureCanvasBase.events)
connect = self._canvas_callbacks._connect_picklable
self._mouse_key_ids = [
connect('key_press_event', backend_bases._key_handler),
connect('key_release_event', backend_bases._key_handler),
connect('key_release_event', backend_bases._key_handler),
connect('button_press_event', backend_bases._mouse_handler),
connect('button_release_event', backend_bases._mouse_handler),
connect('scroll_event', backend_bases._mouse_handler),
connect('motion_notify_event', backend_bases._mouse_handler),
]
self._button_pick_id = connect('button_press_event', self.pick)
self._scroll_pick_id = connect('scroll_event', self.pick)
if figsize is None:
figsize = mpl.rcParams['figure.figsize']
if dpi is None:
dpi = mpl.rcParams['figure.dpi']
if facecolor is None:
facecolor = mpl.rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = mpl.rcParams['figure.edgecolor']
if frameon is None:
frameon = mpl.rcParams['figure.frameon']
if not np.isfinite(figsize).all() or (np.array(figsize) < 0).any():
raise ValueError('figure size must be positive finite not '
f'{figsize}')
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.dpi_scale_trans = Affine2D().scale(dpi)
# do not use property as it will trigger
self._dpi = dpi
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.figbbox = self.bbox
self.transFigure = BboxTransformTo(self.bbox)
self.transSubfigure = self.transFigure
self.patch = Rectangle(
xy=(0, 0), width=1, height=1, visible=frameon,
facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth,
# Don't let the figure patch influence bbox calculation.
in_layout=False)
self._set_artist_props(self.patch)
self.patch.set_antialiased(False)
FigureCanvasBase(self) # Set self.canvas.
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
self._axstack = _AxesStack() # track all figure Axes and current Axes
self.clear()
def pick(self, mouseevent):
if not self.canvas.widgetlock.locked():
super().pick(mouseevent)
def _check_layout_engines_compat(self, old, new):
"""
Helper for set_layout engine
If the figure has used the old engine and added a colorbar then the
value of colorbar_gridspec must be the same on the new engine.
"""
if old is None or new is None:
return True
if old.colorbar_gridspec == new.colorbar_gridspec:
return True
# colorbar layout different, so check if any colorbars are on the
# figure...
for ax in self.axes:
if hasattr(ax, '_colorbar'):
# colorbars list themselves as a colorbar.
return False
return True
def set_layout_engine(self, layout=None, **kwargs):
"""
Set the layout engine for this figure.
Parameters
----------
layout : {'constrained', 'compressed', 'tight', 'none', `.LayoutEngine`, None}
- 'constrained' will use `~.ConstrainedLayoutEngine`
- 'compressed' will also use `~.ConstrainedLayoutEngine`, but with
a correction that attempts to make a good layout for fixed-aspect
ratio Axes.
- 'tight' uses `~.TightLayoutEngine`
- 'none' removes layout engine.
If a `.LayoutEngine` instance, that instance will be used.
If `None`, the behavior is controlled by :rc:`figure.autolayout`
(which if `True` behaves as if 'tight' was passed) and
:rc:`figure.constrained_layout.use` (which if `True` behaves as if
'constrained' was passed). If both are `True`,
:rc:`figure.autolayout` takes priority.
Users and libraries can define their own layout engines and pass
the instance directly as well.
**kwargs
The keyword arguments are passed to the layout engine to set things
like padding and margin sizes. Only used if *layout* is a string.
"""
if layout is None:
if mpl.rcParams['figure.autolayout']:
layout = 'tight'
elif mpl.rcParams['figure.constrained_layout.use']:
layout = 'constrained'
else:
self._layout_engine = None
return
if layout == 'tight':
new_layout_engine = TightLayoutEngine(**kwargs)
elif layout == 'constrained':
new_layout_engine = ConstrainedLayoutEngine(**kwargs)
elif layout == 'compressed':
new_layout_engine = ConstrainedLayoutEngine(compress=True,
**kwargs)
elif layout == 'none':
if self._layout_engine is not None:
new_layout_engine = PlaceHolderLayoutEngine(
self._layout_engine.adjust_compatible,
self._layout_engine.colorbar_gridspec
)
else:
new_layout_engine = None
elif isinstance(layout, LayoutEngine):
new_layout_engine = layout
else:
raise ValueError(f"Invalid value for 'layout': {layout!r}")
if self._check_layout_engines_compat(self._layout_engine,
new_layout_engine):
self._layout_engine = new_layout_engine
else:
raise RuntimeError('Colorbar layout of new layout engine not '
'compatible with old engine, and a colorbar '
'has been created. Engine not changed.')
def get_layout_engine(self):
return self._layout_engine
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditionally.
if 'WebAgg' in type(self.canvas).__name__:
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using `~.pyplot.figure`, it will lack
a `~.backend_bases.FigureManagerBase`, and this method will raise an
AttributeError.
.. warning::
This does not manage an GUI event loop. Consequently, the figure
may only be shown briefly or not shown at all if you or your
environment are not managing an event loop.
Use cases for `.Figure.show` include running this from a GUI
application (where there is persistently an event loop running) or
from a shell, like IPython, that install an input hook to allow the
interactive shell to accept input while the figure is also being
shown and interactive. Some, but not all, GUI toolkits will
register an input hook on import. See :ref:`cp_integration` for
more details.
If you're in a shell without input hook integration or executing a
python script, you should use `matplotlib.pyplot.show` with
``block=True`` instead, which takes care of starting and running
the event loop for you.
Parameters
----------
warn : bool, default: True
If ``True`` and we are not running headless (i.e. on Linux with an
unset DISPLAY), issue warning when called on a non-GUI backend.
"""
if self.canvas.manager is None:
raise AttributeError(
"Figure.show works only for figures managed by pyplot, "
"normally created by pyplot.figure()")
try:
self.canvas.manager.show()
except NonGuiException as exc:
if warn:
_api.warn_external(str(exc))
@property
def axes(self):
"""
List of Axes in the Figure. You can access and modify the Axes in the
Figure through this list.
Do not modify the list itself. Instead, use `~Figure.add_axes`,
`~.Figure.add_subplot` or `~.Figure.delaxes` to add or remove an Axes.
Note: The `.Figure.axes` property and `~.Figure.get_axes` method are
equivalent.
"""
return self._axstack.as_list()
get_axes = axes.fget
@property
def number(self):
"""The figure id, used to identify figures in `.pyplot`."""
# Historically, pyplot dynamically added a number attribute to figure.
# However, this number must stay in sync with the figure manager.
# AFAICS overwriting the number attribute does not have the desired
# effect for pyplot. But there are some repos in GitHub that do change
# number. So let's take it slow and properly migrate away from writing.
#
# Making the dynamic attribute private and wrapping it in a property
# allows to maintain current behavior and deprecate write-access.
#
# When the deprecation expires, there's no need for duplicate state
# anymore and the private _number attribute can be replaced by
# `self.canvas.manager.num` if that exists and None otherwise.
if hasattr(self, '_number'):
return self._number
else:
raise AttributeError(
"'Figure' object has no attribute 'number'. In the future this"
"will change to returning 'None' instead.")
@number.setter
def number(self, num):
_api.warn_deprecated(
"3.10",
message="Changing 'Figure.number' is deprecated since %(since)s and "
"will raise an error starting %(removal)s")
self._number = num
def _get_renderer(self):
if hasattr(self.canvas, 'get_renderer'):
return self.canvas.get_renderer()
else:
return _get_renderer(self)
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi, forward=True):
"""
Parameters
----------
dpi : float
forward : bool
Passed on to `~.Figure.set_size_inches`
"""
if dpi == self._dpi:
# We don't want to cause undue events in backends.
return
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi)
w, h = self.get_size_inches()
self.set_size_inches(w, h, forward=forward)
dpi = property(_get_dpi, _set_dpi, doc="The resolution in dots per inch.")
def get_tight_layout(self):
"""Return whether `.Figure.tight_layout` is called when drawing."""
return isinstance(self.get_layout_engine(), TightLayoutEngine)
@_api.deprecated("3.6", alternative="set_layout_engine",
pending=True)
def set_tight_layout(self, tight):
"""
Set whether and how `.Figure.tight_layout` is called when drawing.
Parameters
----------
tight : bool or dict with keys "pad", "w_pad", "h_pad", "rect" or None
If a bool, sets whether to call `.Figure.tight_layout` upon drawing.
If ``None``, use :rc:`figure.autolayout` instead.
If a dict, pass it as kwargs to `.Figure.tight_layout`, overriding the
default paddings.
"""
if tight is None:
tight = mpl.rcParams['figure.autolayout']
_tight = 'tight' if bool(tight) else 'none'
_tight_parameters = tight if isinstance(tight, dict) else {}
self.set_layout_engine(_tight, **_tight_parameters)
self.stale = True
def get_constrained_layout(self):
"""
Return whether constrained layout is being used.
See :ref:`constrainedlayout_guide`.
"""
return isinstance(self.get_layout_engine(), ConstrainedLayoutEngine)
@_api.deprecated("3.6", alternative="set_layout_engine('constrained')",
pending=True)
def set_constrained_layout(self, constrained):
"""
Set whether ``constrained_layout`` is used upon drawing.
If None, :rc:`figure.constrained_layout.use` value will be used.
When providing a dict containing the keys ``w_pad``, ``h_pad``
the default ``constrained_layout`` paddings will be
overridden. These pads are in inches and default to 3.0/72.0.
``w_pad`` is the width padding and ``h_pad`` is the height padding.
Parameters
----------
constrained : bool or dict or None
"""
if constrained is None:
constrained = mpl.rcParams['figure.constrained_layout.use']
_constrained = 'constrained' if bool(constrained) else 'none'
_parameters = constrained if isinstance(constrained, dict) else {}
self.set_layout_engine(_constrained, **_parameters)
self.stale = True
@_api.deprecated(
"3.6", alternative="figure.get_layout_engine().set()",
pending=True)
def set_constrained_layout_pads(self, **kwargs):
"""
Set padding for ``constrained_layout``.
Tip: The parameters can be passed from a dictionary by using
``fig.set_constrained_layout(**pad_dict)``.
See :ref:`constrainedlayout_guide`.
Parameters
----------
w_pad : float, default: :rc:`figure.constrained_layout.w_pad`
Width padding in inches. This is the pad around Axes
and is meant to make sure there is enough room for fonts to
look good. Defaults to 3 pts = 0.04167 inches
h_pad : float, default: :rc:`figure.constrained_layout.h_pad`
Height padding in inches. Defaults to 3 pts.
wspace : float, default: :rc:`figure.constrained_layout.wspace`
Width padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being w_pad + wspace.
hspace : float, default: :rc:`figure.constrained_layout.hspace`
Height padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being h_pad + hspace.
"""
if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):
self.get_layout_engine().set(**kwargs)
@_api.deprecated("3.6", alternative="fig.get_layout_engine().get()",
pending=True)
def get_constrained_layout_pads(self, relative=False):
"""
Get padding for ``constrained_layout``.
Returns a list of ``w_pad, h_pad`` in inches and
``wspace`` and ``hspace`` as fractions of the subplot.
All values are None if ``constrained_layout`` is not used.
See :ref:`constrainedlayout_guide`.
Parameters
----------
relative : bool
If `True`, then convert from inches to figure relative.
"""
if not isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):
return None, None, None, None
info = self.get_layout_engine().get()
w_pad = info['w_pad']
h_pad = info['h_pad']
wspace = info['wspace']
hspace = info['hspace']
if relative and (w_pad is not None or h_pad is not None):
renderer = self._get_renderer()
dpi = renderer.dpi
w_pad = w_pad * dpi / renderer.width
h_pad = h_pad * dpi / renderer.height
return w_pad, h_pad, wspace, hspace
def set_canvas(self, canvas):
"""
Set the canvas that contains the figure
Parameters
----------
canvas : FigureCanvas
"""
self.canvas = canvas
@_docstring.interpd
def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None,
vmin=None, vmax=None, origin=None, resize=False, *,
colorizer=None, **kwargs):
"""
Add a non-resampled image to the figure.
The image is attached to the lower or upper left corner depending on
*origin*.
Parameters
----------
X
The image data. This is an array of one of the following shapes:
- (M, N): an image with scalar data. Color-mapping is controlled
by *cmap*, *norm*, *vmin*, and *vmax*.
- (M, N, 3): an image with RGB values (0-1 float or 0-255 int).
- (M, N, 4): an image with RGBA values (0-1 float or 0-255 int),
i.e. including transparency.
xo, yo : int
The *x*/*y* image offset in pixels.
alpha : None or float
The alpha blending value.
%(cmap_doc)s
This parameter is ignored if *X* is RGB(A).
%(norm_doc)s
This parameter is ignored if *X* is RGB(A).
%(vmin_vmax_doc)s
This parameter is ignored if *X* is RGB(A).
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Indicates where the [0, 0] index of the array is in the upper left
or lower left corner of the Axes.
resize : bool
If *True*, resize the figure to match the given image size.
%(colorizer_doc)s
This parameter is ignored if *X* is RGB(A).
Returns
-------
`matplotlib.image.FigureImage`
Other Parameters
----------------
**kwargs
Additional kwargs are `.Artist` kwargs passed on to `.FigureImage`.
Notes
-----
figimage complements the Axes image (`~matplotlib.axes.Axes.imshow`)
which will be resampled to fit the current Axes. If you want
a resampled image to fill the entire figure, you can define an
`~matplotlib.axes.Axes` with extent [0, 0, 1, 1].
Examples
--------
::
f = plt.figure()
nx = int(f.get_figwidth() * f.dpi)
ny = int(f.get_figheight() * f.dpi)
data = np.random.random((ny, nx))
f.figimage(data)
plt.show()
"""
if resize:
dpi = self.get_dpi()
figsize = [x / dpi for x in (X.shape[1], X.shape[0])]
self.set_size_inches(figsize, forward=True)
im = mimage.FigureImage(self, cmap=cmap, norm=norm,
colorizer=colorizer,
offsetx=xo, offsety=yo,
origin=origin, **kwargs)
im.stale_callback = _stale_figure_callback
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im._check_exclusionary_keywords(colorizer, vmin=vmin, vmax=vmax)
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = self.images.remove
self.stale = True
return im
def set_size_inches(self, w, h=None, forward=True):
"""
Set the figure size in inches.
Call signatures::
fig.set_size_inches(w, h) # OR
fig.set_size_inches((w, h))
Parameters
----------
w : (float, float) or float
Width and height in inches (if height not specified as a separate
argument) or width.
h : float
Height in inches.
forward : bool, default: True
If ``True``, the canvas size is automatically updated, e.g.,
you can resize the figure window from the shell.
See Also
--------
matplotlib.figure.Figure.get_size_inches
matplotlib.figure.Figure.set_figwidth
matplotlib.figure.Figure.set_figheight
Notes
-----
To transform from pixels to inches divide by `Figure.dpi`.
"""
if h is None: # Got called with a single pair as argument.
w, h = w
size = np.array([w, h])
if not np.isfinite(size).all() or (size < 0).any():
raise ValueError(f'figure size must be positive finite not {size}')
self.bbox_inches.p1 = size
if forward:
manager = self.canvas.manager
if manager is not None:
manager.resize(*(size * self.dpi).astype(int))
self.stale = True
def get_size_inches(self):
"""
Return the current size of the figure in inches.
Returns
-------
ndarray
The size (width, height) of the figure in inches.
See Also
--------
matplotlib.figure.Figure.set_size_inches
matplotlib.figure.Figure.get_figwidth
matplotlib.figure.Figure.get_figheight
Notes
-----
The size in pixels can be obtained by multiplying with `Figure.dpi`.
"""
return np.array(self.bbox_inches.p1)
def get_figwidth(self):
"""Return the figure width in inches."""
return self.bbox_inches.width
def get_figheight(self):
"""Return the figure height in inches."""
return self.bbox_inches.height
def get_dpi(self):
"""Return the resolution in dots per inch as a float."""
return self.dpi
def set_dpi(self, val):
"""
Set the resolution of the figure in dots-per-inch.
Parameters
----------
val : float
"""
self.dpi = val
self.stale = True
def set_figwidth(self, val, forward=True):
"""
Set the width of the figure in inches.
Parameters
----------
val : float
forward : bool
See `set_size_inches`.
See Also
--------
matplotlib.figure.Figure.set_figheight
matplotlib.figure.Figure.set_size_inches
"""
self.set_size_inches(val, self.get_figheight(), forward=forward)
def set_figheight(self, val, forward=True):
"""
Set the height of the figure in inches.
Parameters
----------
val : float
forward : bool
See `set_size_inches`.
See Also
--------
matplotlib.figure.Figure.set_figwidth
matplotlib.figure.Figure.set_size_inches
"""
self.set_size_inches(self.get_figwidth(), val, forward=forward)
def clear(self, keep_observers=False):
# docstring inherited
super().clear(keep_observers=keep_observers)
# FigureBase.clear does not clear toolbars, as
# only Figure can have toolbars
toolbar = self.canvas.toolbar
if toolbar is not None:
toolbar.update()
@_finalize_rasterization
@allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
with self._render_lock:
artists = self._get_draw_artists(renderer)
try:
renderer.open_group('figure', gid=self.get_gid())
if self.axes and self.get_layout_engine() is not None:
try:
self.get_layout_engine().execute(self)
except ValueError:
pass
# ValueError can occur when resizing a window.
self.patch.draw(renderer)
mimage._draw_list_compositing_images(
renderer, self, artists, self.suppressComposite)
renderer.close_group('figure')
finally:
self.stale = False
DrawEvent("draw_event", self.canvas, renderer)._process()
def draw_without_rendering(self):
"""
Draw the figure with no output. Useful to get the final size of
artists that require a draw before their size is known (e.g. text).
"""
renderer = _get_renderer(self)
with renderer._draw_disabled():
self.draw(renderer)
def draw_artist(self, a):
"""
Draw `.Artist` *a* only.
"""
a.draw(self.canvas.get_renderer())
def __getstate__(self):
state = super().__getstate__()
# The canvas cannot currently be pickled, but this has the benefit
# of meaning that a figure can be detached from one canvas, and
# re-attached to another.
state.pop("canvas")
# discard any changes to the dpi due to pixel ratio changes
state["_dpi"] = state.get('_original_dpi', state['_dpi'])
# add version information to the state
state['__mpl_version__'] = mpl.__version__
# check whether the figure manager (if any) is registered with pyplot
from matplotlib import _pylab_helpers
if self.canvas.manager in _pylab_helpers.Gcf.figs.values():
state['_restore_to_pylab'] = True
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != mpl.__version__:
_api.warn_external(
f"This figure was saved with matplotlib version {version} and "
f"loaded with {mpl.__version__} so may not function correctly."
)
self.__dict__ = state
# re-initialise some of the unstored state information
FigureCanvasBase(self) # Set self.canvas.
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
backend = plt._get_backend_mod()
mgr = backend.new_figure_manager_given_figure(num, self)
pylab_helpers.Gcf._set_new_active_manager(mgr)
plt.draw_if_interactive()
self.stale = True
def add_axobserver(self, func):
"""Whenever the Axes state change, ``func(self)`` will be called."""
# Connect a wrapper lambda and not func itself, to avoid it being
# weakref-collected.
self._axobservers.connect("_axes_change_event", lambda arg: func(arg))
def savefig(self, fname, *, transparent=None, **kwargs):
"""
Save the current figure as an image or vector graphic to a file.
Call signature::
savefig(fname, *, transparent=None, dpi='figure', format=None,
metadata=None, bbox_inches=None, pad_inches=0.1,
facecolor='auto', edgecolor='auto', backend=None,
**kwargs
)
The available output formats depend on the backend being used.
Parameters
----------
fname : str or path-like or binary file-like
A path, or a Python file-like object, or
possibly some backend-dependent object such as
`matplotlib.backends.backend_pdf.PdfPages`.
If *format* is set, it determines the output format, and the file
is saved as *fname*. Note that *fname* is used verbatim, and there
is no attempt to make the extension, if any, of *fname* match
*format*, and no extension is appended.
If *format* is not set, then the format is inferred from the
extension of *fname*, if there is one. If *format* is not
set and *fname* has no extension, then the file is saved with
:rc:`savefig.format` and the appropriate extension is appended to
*fname*.
Other Parameters
----------------
transparent : bool, default: :rc:`savefig.transparent`
If *True*, the Axes patches will all be transparent; the
Figure patch will also be transparent unless *facecolor*
and/or *edgecolor* are specified via kwargs.
If *False* has no effect and the color of the Axes and
Figure patches are unchanged (unless the Figure patch
is specified via the *facecolor* and/or *edgecolor* keyword
arguments in which case those colors are used).
The transparency of these patches will be restored to their
original values upon exit of this function.
This is useful, for example, for displaying
a plot on top of a colored background on a web page.
dpi : float or 'figure', default: :rc:`savefig.dpi`
The resolution in dots per inch. If 'figure', use the figure's
dpi value.
format : str
The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when
this is unset is documented under *fname*.
metadata : dict, optional
Key/value pairs to store in the image metadata. The supported keys
and defaults depend on the image format and backend:
- 'png' with Agg backend: See the parameter ``metadata`` of
`~.FigureCanvasAgg.print_png`.
- 'pdf' with pdf backend: See the parameter ``metadata`` of
`~.backend_pdf.PdfPages`.
- 'svg' with svg backend: See the parameter ``metadata`` of
`~.FigureCanvasSVG.print_svg`.
- 'eps' and 'ps' with PS backend: Only 'Creator' is supported.
Not supported for 'pgf', 'raw', and 'rgba' as those formats do not support
embedding metadata.
Does not currently support 'jpg', 'tiff', or 'webp', but may include
embedding EXIF metadata in the future.
bbox_inches : str or `.Bbox`, default: :rc:`savefig.bbox`
Bounding box in inches: only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of the figure.
pad_inches : float or 'layout', default: :rc:`savefig.pad_inches`
Amount of padding in inches around the figure when bbox_inches is
'tight'. If 'layout' use the padding from the constrained or
compressed layout engine; ignored if one of those engines is not in
use.
facecolor : :mpltype:`color` or 'auto', default: :rc:`savefig.facecolor`
The facecolor of the figure. If 'auto', use the current figure
facecolor.
edgecolor : :mpltype:`color` or 'auto', default: :rc:`savefig.edgecolor`
The edgecolor of the figure. If 'auto', use the current figure
edgecolor.
backend : str, optional
Use a non-default backend to render the file, e.g. to render a
png file with the "cairo" backend rather than the default "agg",
or a pdf file with the "pgf" backend rather than the default
"pdf". Note that the default backend is normally sufficient. See
:ref:`the-builtin-backends` for a list of valid backends for each
file format. Custom backends can be referenced as "module://...".
orientation : {'landscape', 'portrait'}
Currently only supported by the postscript backend.
papertype : str
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
A list of extra artists that will be considered when the
tight bbox is calculated.
pil_kwargs : dict, optional
Additional keyword arguments that are passed to
`PIL.Image.Image.save` when saving the figure.
"""
kwargs.setdefault('dpi', mpl.rcParams['savefig.dpi'])
if transparent is None:
transparent = mpl.rcParams['savefig.transparent']
with ExitStack() as stack:
if transparent:
def _recursively_make_subfig_transparent(exit_stack, subfig):
exit_stack.enter_context(
subfig.patch._cm_set(
facecolor="none", edgecolor="none"))
for ax in subfig.axes:
exit_stack.enter_context(
ax.patch._cm_set(
facecolor="none", edgecolor="none"))
for sub_subfig in subfig.subfigs:
_recursively_make_subfig_transparent(
exit_stack, sub_subfig)
def _recursively_make_axes_transparent(exit_stack, ax):
exit_stack.enter_context(
ax.patch._cm_set(facecolor="none", edgecolor="none"))
for child_ax in ax.child_axes:
exit_stack.enter_context(
child_ax.patch._cm_set(
facecolor="none", edgecolor="none"))
for child_childax in ax.child_axes:
_recursively_make_axes_transparent(
exit_stack, child_childax)
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
# set subfigure to appear transparent in printed image
for subfig in self.subfigs:
_recursively_make_subfig_transparent(stack, subfig)
# set Axes to be transparent
for ax in self.axes:
_recursively_make_axes_transparent(stack, ax)
self.canvas.print_figure(fname, **kwargs)
def ginput(self, n=1, timeout=30, show_clicks=True,
mouse_add=MouseButton.LEFT,
mouse_pop=MouseButton.RIGHT,
mouse_stop=MouseButton.MIDDLE):
"""
Blocking call to interact with a figure.
Wait until the user clicks *n* times on the figure, and return the
coordinates of each click in a list.
There are three possible interactions:
- Add a point.
- Remove the most recently added point.
- Stop the interaction and return the points added so far.
The actions are assigned to mouse buttons via the arguments
*mouse_add*, *mouse_pop* and *mouse_stop*.
Parameters
----------
n : int, default: 1
Number of mouse clicks to accumulate. If negative, accumulate
clicks until the input is terminated manually.
timeout : float, default: 30 seconds
Number of seconds to wait before timing out. If zero or negative
will never time out.
show_clicks : bool, default: True
If True, show a red cross at the location of each click.
mouse_add : `.MouseButton` or None, default: `.MouseButton.LEFT`
Mouse button used to add points.
mouse_pop : `.MouseButton` or None, default: `.MouseButton.RIGHT`
Mouse button used to remove the most recently added point.
mouse_stop : `.MouseButton` or None, default: `.MouseButton.MIDDLE`
Mouse button used to stop input.
Returns
-------
list of tuples
A list of the clicked (x, y) coordinates.
Notes
-----
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right-clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
clicks = []
marks = []
def handler(event):
is_button = event.name == "button_press_event"
is_key = event.name == "key_press_event"
# Quit (even if not in infinite mode; this is consistent with
# MATLAB and sometimes quite useful, but will require the user to
# test how many points were actually returned before using data).
if (is_button and event.button == mouse_stop
or is_key and event.key in ["escape", "enter"]):
self.canvas.stop_event_loop()
# Pop last click.
elif (is_button and event.button == mouse_pop
or is_key and event.key in ["backspace", "delete"]):
if clicks:
clicks.pop()
if show_clicks:
marks.pop().remove()
self.canvas.draw()
# Add new click.
elif (is_button and event.button == mouse_add
# On macOS/gtk, some keys return None.
or is_key and event.key is not None):
if event.inaxes:
clicks.append((event.xdata, event.ydata))
_log.info("input %i: %f, %f",
len(clicks), event.xdata, event.ydata)
if show_clicks:
line = mpl.lines.Line2D([event.xdata], [event.ydata],
marker="+", color="r")
event.inaxes.add_line(line)
marks.append(line)
self.canvas.draw()
if len(clicks) == n and n > 0:
self.canvas.stop_event_loop()
_blocking_input.blocking_input_loop(
self, ["button_press_event", "key_press_event"], timeout, handler)
# Cleanup.
for mark in marks:
mark.remove()
self.canvas.draw()
return clicks
def waitforbuttonpress(self, timeout=-1):
"""
Blocking call to interact with the figure.
Wait for user input and return True if a key was pressed, False if a
mouse button was pressed and None if no input was given within
*timeout* seconds. Negative values deactivate *timeout*.
"""
event = None
def handler(ev):
nonlocal event
event = ev
self.canvas.stop_event_loop()
_blocking_input.blocking_input_loop(
self, ["button_press_event", "key_press_event"], timeout, handler)
return None if event is None else event.name == "key_press_event"
def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust the padding between and around subplots.
To exclude an artist on the Axes from the bounding box calculation
that determines the subplot parameters (i.e. legend, or annotation),
set ``a.set_in_layout(False)`` for that artist.
Parameters
----------
pad : float, default: 1.08
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad : float, default: *pad*
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size.
rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1)
A rectangle in normalized figure coordinates into which the whole
subplots area (including labels) will fit.
See Also
--------
.Figure.set_layout_engine
.pyplot.tight_layout
"""
# note that here we do not permanently set the figures engine to
# tight_layout but rather just perform the layout in place and remove
# any previous engines.
engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
try:
previous_engine = self.get_layout_engine()
self.set_layout_engine(engine)
engine.execute(self)
if previous_engine is not None and not isinstance(
previous_engine, (TightLayoutEngine, PlaceHolderLayoutEngine)
):
_api.warn_external('The figure layout has changed to tight')
finally:
self.set_layout_engine('none')
def figaspect(arg):
"""
Calculate the width and height for a figure with a specified aspect ratio.
While the height is taken from :rc:`figure.figsize`, the width is
adjusted to match the desired aspect ratio. Additionally, it is ensured
that the width is in the range [4., 16.] and the height is in the range
[2., 16.]. If necessary, the default height is adjusted to ensure this.
Parameters
----------
arg : float or 2D array
If a float, this defines the aspect ratio (i.e. the ratio height /
width).
In case of an array the aspect ratio is number of rows / number of
columns, so that the array could be fitted in the figure undistorted.
Returns
-------
size : (2,) array
The width and height of the figure in inches.
Notes
-----
If you want to create an Axes within the figure, that still preserves the
aspect ratio, be sure to create it with equal width and height. See
examples below.
Thanks to Fernando Perez for this function.
Examples
--------
Make a figure twice as tall as it is wide::
w, h = figaspect(2.)
fig = Figure(figsize=(w, h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Make a figure with the proper aspect for an array::
A = rand(5, 3)
w, h = figaspect(A)
fig = Figure(figsize=(w, h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
"""
isarray = hasattr(arg, 'shape') and not np.isscalar(arg)
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = nr / nc
else:
arr_ratio = arg
# Height of user figure defaults
fig_height = mpl.rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
venv\Lib\site-packages\matplotlib\font_manager.py
"""
A module for finding, managing, and using fonts across platforms.
This module provides a single `FontManager` instance, ``fontManager``, that can
be shared across backends and platforms. The `findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified `FontProperties`
instance. The `FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The `FontManager.addfont` function adds a custom font from a file without
installing it into your operating system.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification `_.
Future versions may implement the Level 2 or 2.1 specifications.
"""
# KNOWN ISSUES
#
# - documentation
# - font variant is untested
# - font stretch is incomplete
# - font size is incomplete
# - default font algorithm needs improvement and testing
# - setWeights function needs improvement
# - 'light' is an invalid weight value, remove it.
from __future__ import annotations
from base64 import b64encode
import copy
import dataclasses
from functools import lru_cache
import functools
from io import BytesIO
import json
import logging
from numbers import Number
import os
from pathlib import Path
import plistlib
import re
import subprocess
import sys
import threading
import matplotlib as mpl
from matplotlib import _api, _afm, cbook, ft2font
from matplotlib._fontconfig_pattern import (
parse_fontconfig_pattern, generate_fontconfig_pattern)
from matplotlib.rcsetup import _validators
_log = logging.getLogger(__name__)
font_scalings = {
'xx-small': 0.579,
'x-small': 0.694,
'small': 0.833,
'medium': 1.0,
'large': 1.200,
'x-large': 1.440,
'xx-large': 1.728,
'larger': 1.2,
'smaller': 0.833,
None: 1.0,
}
stretch_dict = {
'ultra-condensed': 100,
'extra-condensed': 200,
'condensed': 300,
'semi-condensed': 400,
'normal': 500,
'semi-expanded': 600,
'semi-extended': 600,
'expanded': 700,
'extended': 700,
'extra-expanded': 800,
'extra-extended': 800,
'ultra-expanded': 900,
'ultra-extended': 900,
}
weight_dict = {
'ultralight': 100,
'light': 200,
'normal': 400,
'regular': 400,
'book': 400,
'medium': 500,
'roman': 500,
'semibold': 600,
'demibold': 600,
'demi': 600,
'bold': 700,
'heavy': 800,
'extra bold': 800,
'black': 900,
}
_weight_regexes = [
# From fontconfig's FcFreeTypeQueryFaceInternal; not the same as
# weight_dict!
("thin", 100),
("extralight", 200),
("ultralight", 200),
("demilight", 350),
("semilight", 350),
("light", 300), # Needs to come *after* demi/semilight!
("book", 380),
("regular", 400),
("normal", 400),
("medium", 500),
("demibold", 600),
("demi", 600),
("semibold", 600),
("extrabold", 800),
("superbold", 800),
("ultrabold", 800),
("bold", 700), # Needs to come *after* extra/super/ultrabold!
("ultrablack", 1000),
("superblack", 1000),
("extrablack", 1000),
(r"\bultra", 1000),
("black", 900), # Needs to come *after* ultra/super/extrablack!
("heavy", 900),
]
font_family_aliases = {
'serif',
'sans-serif',
'sans serif',
'cursive',
'fantasy',
'monospace',
'sans',
}
# OS Font paths
try:
_HOME = Path.home()
except Exception: # Exceptions thrown by home() are not specified...
_HOME = Path(os.devnull) # Just an arbitrary path with no children.
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
MSUserFontDirectories = [
str(_HOME / 'AppData/Local/Microsoft/Windows/Fonts'),
str(_HOME / 'AppData/Roaming/Microsoft/Windows/Fonts'),
]
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
"/usr/X11/lib/X11/fonts",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
# user fonts
str((Path(os.environ.get('XDG_DATA_HOME') or _HOME / ".local/share"))
/ "fonts"),
str(_HOME / ".fonts"),
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/",
# fonts installed via MacPorts
"/opt/local/share/fonts",
# user fonts
str(_HOME / "Library/Fonts"),
]
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions that are synonyms for
the given file extension *fileext*.
"""
return {
'afm': ['afm'],
'otf': ['otf', 'ttc', 'ttf'],
'ttc': ['otf', 'ttc', 'ttf'],
'ttf': ['otf', 'ttc', 'ttf'],
}[fontext]
def list_fonts(directory, extensions):
"""
Return a list of all fonts matching any of the extensions, found
recursively under the directory.
"""
extensions = ["." + ext for ext in extensions]
return [os.path.join(dirpath, filename)
# os.walk ignores access errors, unlike Path.glob.
for dirpath, _, filenames in os.walk(directory)
for filename in filenames
if Path(filename).suffix.lower() in extensions]
def win32FontDirectory():
r"""
Return the user-specified font directory for Win32. This is
looked up from the registry key ::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, ``%WINDIR%\Fonts`` will be returned.
""" # noqa: E501
import winreg
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders) as user:
return winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
return os.path.join(os.environ['WINDIR'], 'Fonts')
def _get_win32_installed_fonts():
"""List the font paths known to the Windows registry."""
import winreg
items = set()
# Search and resolve fonts listed in the registry.
for domain, base_dirs in [
(winreg.HKEY_LOCAL_MACHINE, [win32FontDirectory()]), # System.
(winreg.HKEY_CURRENT_USER, MSUserFontDirectories), # User.
]:
for base_dir in base_dirs:
for reg_path in MSFontDirectories:
try:
with winreg.OpenKey(domain, reg_path) as local:
for j in range(winreg.QueryInfoKey(local)[1]):
# value may contain the filename of the font or its
# absolute path.
key, value, tp = winreg.EnumValue(local, j)
if not isinstance(value, str):
continue
try:
# If value contains already an absolute path,
# then it is not changed further.
path = Path(base_dir, value).resolve()
except RuntimeError:
# Don't fail with invalid entries.
continue
items.add(path)
except (OSError, MemoryError):
continue
return items
@lru_cache
def _get_fontconfig_fonts():
"""Cache and list the font paths known to ``fc-list``."""
try:
if b'--format' not in subprocess.check_output(['fc-list', '--help']):
_log.warning( # fontconfig 2.7 implemented --format.
'Matplotlib needs fontconfig>=2.7 to query system fonts.')
return []
out = subprocess.check_output(['fc-list', '--format=%{file}\\n'])
except (OSError, subprocess.CalledProcessError):
return []
return [Path(os.fsdecode(fname)) for fname in out.split(b'\n')]
@lru_cache
def _get_macos_fonts():
"""Cache and list the font paths known to ``system_profiler SPFontsDataType``."""
try:
d, = plistlib.loads(
subprocess.check_output(["system_profiler", "-xml", "SPFontsDataType"]))
except (OSError, subprocess.CalledProcessError, plistlib.InvalidFileException):
return []
return [Path(entry["path"]) for entry in d["_items"]]
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = set()
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
installed_fonts = _get_win32_installed_fonts()
fontpaths = []
else:
installed_fonts = _get_fontconfig_fonts()
if sys.platform == 'darwin':
installed_fonts += _get_macos_fonts()
fontpaths = [*X11FontDirectories, *OSXFontDirectories]
else:
fontpaths = X11FontDirectories
fontfiles.update(str(path) for path in installed_fonts
if path.suffix.lower()[1:] in fontexts)
elif isinstance(fontpaths, str):
fontpaths = [fontpaths]
for path in fontpaths:
fontfiles.update(map(os.path.abspath, list_fonts(path, fontexts)))
return [fname for fname in fontfiles if os.path.exists(fname)]
@dataclasses.dataclass(frozen=True)
class FontEntry:
"""
A class for storing Font properties.
It is used when populating the font lookup dictionary.
"""
fname: str = ''
name: str = ''
style: str = 'normal'
variant: str = 'normal'
weight: str | int = 'normal'
stretch: str = 'normal'
size: str = 'medium'
def _repr_html_(self) -> str:
png_stream = self._repr_png_()
png_b64 = b64encode(png_stream).decode()
return f""
def _repr_png_(self) -> bytes:
from matplotlib.figure import Figure # Circular import.
fig = Figure()
font_path = Path(self.fname) if self.fname != '' else None
fig.text(0, 0, self.name, font=font_path)
with BytesIO() as buf:
fig.savefig(buf, bbox_inches='tight', transparent=True)
return buf.getvalue()
def ttfFontProperty(font):
"""
Extract information from a TrueType font file.
Parameters
----------
font : `.FT2Font`
The TrueType font file from which information will be extracted.
Returns
-------
`FontEntry`
The extracted font properties.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
mac_key = (1, # platform: macintosh
0, # id: roman
0) # langid: english
ms_key = (3, # platform: microsoft
1, # id: unicode_cs
0x0409) # langid: english_united_states
# These tables are actually mac_roman-encoded, but mac_roman support may be
# missing in some alternative Python implementations and we are only going
# to look for ASCII substrings, where any ASCII-compatible encoding works
# - or big-endian UTF-16, since important Microsoft fonts use that.
sfnt2 = (sfnt.get((*mac_key, 2), b'').decode('latin-1').lower() or
sfnt.get((*ms_key, 2), b'').decode('utf_16_be').lower())
sfnt4 = (sfnt.get((*mac_key, 4), b'').decode('latin-1').lower() or
sfnt.get((*ms_key, 4), b'').decode('utf_16_be').lower())
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif ft2font.StyleFlags.ITALIC in font.style_flags:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# The weight-guessing algorithm is directly translated from fontconfig
# 2.13.1's FcFreeTypeQueryFaceInternal (fcfreetype.c).
wws_subfamily = 22
typographic_subfamily = 16
font_subfamily = 2
styles = [
sfnt.get((*mac_key, wws_subfamily), b'').decode('latin-1'),
sfnt.get((*mac_key, typographic_subfamily), b'').decode('latin-1'),
sfnt.get((*mac_key, font_subfamily), b'').decode('latin-1'),
sfnt.get((*ms_key, wws_subfamily), b'').decode('utf-16-be'),
sfnt.get((*ms_key, typographic_subfamily), b'').decode('utf-16-be'),
sfnt.get((*ms_key, font_subfamily), b'').decode('utf-16-be'),
]
styles = [*filter(None, styles)] or [font.style_name]
def get_weight(): # From fontconfig's FcFreeTypeQueryFaceInternal.
# OS/2 table weight.
os2 = font.get_sfnt_table("OS/2")
if os2 and os2["version"] != 0xffff:
return os2["usWeightClass"]
# PostScript font info weight.
try:
ps_font_info_weight = (
font.get_ps_font_info()["weight"].replace(" ", "") or "")
except ValueError:
pass
else:
for regex, weight in _weight_regexes:
if re.fullmatch(regex, ps_font_info_weight, re.I):
return weight
# Style name weight.
for style in styles:
style = style.replace(" ", "")
for regex, weight in _weight_regexes:
if re.search(regex, style, re.I):
return weight
if ft2font.StyleFlags.BOLD in font.style_flags:
return 700 # "bold"
return 500 # "medium", not "regular"!
weight = int(get_weight())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if any(word in sfnt4 for word in ['narrow', 'condensed', 'cond']):
stretch = 'condensed'
elif 'demi cond' in sfnt4:
stretch = 'semi-condensed'
elif any(word in sfnt4 for word in ['wide', 'expanded', 'extended']):
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
if not font.scalable:
raise NotImplementedError("Non-scalable fonts are not supported")
size = 'scalable'
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
Extract information from an AFM font file.
Parameters
----------
fontpath : str
The filename corresponding to *font*.
font : AFM
The AFM font file from which information will be extracted.
Returns
-------
`FontEntry`
The extracted font properties.
"""
name = font.get_familyname()
fontname = font.get_fontname().lower()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or 'italic' in name.lower():
style = 'italic'
elif 'oblique' in name.lower():
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
weight = font.get_weight().lower()
if weight not in weight_dict:
weight = 'normal'
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if 'demi cond' in fontname:
stretch = 'semi-condensed'
elif any(word in fontname for word in ['narrow', 'cond']):
stretch = 'condensed'
elif any(word in fontname for word in ['wide', 'expanded', 'extended']):
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def _cleanup_fontproperties_init(init_method):
"""
A decorator to limit the call signature to single a positional argument
or alternatively only keyword arguments.
We still accept but deprecate all other call signatures.
When the deprecation expires we can switch the signature to::
__init__(self, pattern=None, /, *, family=None, style=None, ...)
plus a runtime check that pattern is not used alongside with the
keyword arguments. This results eventually in the two possible
call signatures::
FontProperties(pattern)
FontProperties(family=..., size=..., ...)
"""
@functools.wraps(init_method)
def wrapper(self, *args, **kwargs):
# multiple args with at least some positional ones
if len(args) > 1 or len(args) == 1 and kwargs:
# Note: Both cases were previously handled as individual properties.
# Therefore, we do not mention the case of font properties here.
_api.warn_deprecated(
"3.10",
message="Passing individual properties to FontProperties() "
"positionally was deprecated in Matplotlib %(since)s and "
"will be removed in %(removal)s. Please pass all properties "
"via keyword arguments."
)
# single non-string arg -> clearly a family not a pattern
if len(args) == 1 and not kwargs and not cbook.is_scalar_or_string(args[0]):
# Case font-family list passed as single argument
_api.warn_deprecated(
"3.10",
message="Passing family as positional argument to FontProperties() "
"was deprecated in Matplotlib %(since)s and will be removed "
"in %(removal)s. Please pass family names as keyword"
"argument."
)
# Note on single string arg:
# This has been interpreted as pattern so far. We are already raising if a
# non-pattern compatible family string was given. Therefore, we do not need
# to warn for this case.
return init_method(self, *args, **kwargs)
return wrapper
class FontProperties:
"""
A class for storing and manipulating font properties.
The font properties are the six properties described in the
`W3C Cascading Style Sheet, Level 1
`_ font
specification and *math_fontfamily* for math fonts:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either 'sans-serif',
'serif', 'cursive', 'fantasy', or 'monospace'. In that case, the actual
font to be used will be looked up from the associated rcParam during the
search process in `.findfont`. Default: :rc:`font.family`
- style: Either 'normal', 'italic' or 'oblique'.
Default: :rc:`font.style`
- variant: Either 'normal' or 'small-caps'.
Default: :rc:`font.variant`
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'. Default: :rc:`font.stretch`
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'. Default: :rc:`font.weight`
- size: Either a relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g., 10. Default: :rc:`font.size`
- math_fontfamily: The family of fonts used to render math text.
Supported values are: 'dejavusans', 'dejavuserif', 'cm',
'stix', 'stixsans' and 'custom'. Default: :rc:`mathtext.fontset`
Alternatively, a font may be specified using the absolute path to a font
file, by using the *fname* kwarg. However, in this case, it is typically
simpler to just pass the path (as a `pathlib.Path`, not a `str`) to the
*font* kwarg of the `.Text` object.
The preferred usage of font sizes is to use the relative values,
e.g., 'large', instead of absolute font sizes, e.g., 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size.
This class accepts a single positional string as fontconfig_ pattern_,
or alternatively individual properties as keyword arguments::
FontProperties(pattern)
FontProperties(*, family=None, style=None, variant=None, ...)
This support does not depend on fontconfig; we are merely borrowing its
pattern syntax for use here.
.. _fontconfig: https://www.freedesktop.org/wiki/Software/fontconfig/
.. _pattern:
https://www.freedesktop.org/software/fontconfig/fontconfig-user.html
Note that Matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in Matplotlib than in other applications that use
fontconfig.
"""
@_cleanup_fontproperties_init
def __init__(self, family=None, style=None, variant=None, weight=None,
stretch=None, size=None,
fname=None, # if set, it's a hardcoded filename to use
math_fontfamily=None):
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
self.set_math_fontfamily(math_fontfamily)
# Treat family as a fontconfig pattern if it is the only parameter
# provided. Even in that case, call the other setters first to set
# attributes not specified by the pattern to the rcParams defaults.
if (isinstance(family, str)
and style is None and variant is None and weight is None
and stretch is None and size is None and fname is None):
self.set_fontconfig_pattern(family)
@classmethod
def _from_any(cls, arg):
"""
Generic constructor which can build a `.FontProperties` from any of the
following:
- a `.FontProperties`: it is passed through as is;
- `None`: a `.FontProperties` using rc values is used;
- an `os.PathLike`: it is used as path to the font file;
- a `str`: it is parsed as a fontconfig pattern;
- a `dict`: it is passed as ``**kwargs`` to `.FontProperties`.
"""
if arg is None:
return cls()
elif isinstance(arg, cls):
return arg
elif isinstance(arg, os.PathLike):
return cls(fname=arg)
elif isinstance(arg, str):
return cls(arg)
else:
return cls(**arg)
def __hash__(self):
l = (tuple(self.get_family()),
self.get_slant(),
self.get_variant(),
self.get_weight(),
self.get_stretch(),
self.get_size(),
self.get_file(),
self.get_math_fontfamily())
return hash(l)
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of individual font family names or generic family names.
The font families or generic font families (which will be resolved
from their respective rcParams when searching for a matching font) in
the order of preference.
"""
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font properties.
"""
return get_font(findfont(self)).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or 'oblique'.
"""
return self._slant
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or 'small-caps'.
"""
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
return self._stretch
def get_size(self):
"""
Return the font size.
"""
return self._size
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig_ pattern_ suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
This support does not depend on fontconfig; we are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. Can be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', a real font name or a list of real
font names. Real font names are not supported when
:rc:`text.usetex` is `True`. Default: :rc:`font.family`
"""
if family is None:
family = mpl.rcParams['font.family']
if isinstance(family, str):
family = [family]
self._family = family
def set_style(self, style):
"""
Set the font style.
Parameters
----------
style : {'normal', 'italic', 'oblique'}, default: :rc:`font.style`
"""
if style is None:
style = mpl.rcParams['font.style']
_api.check_in_list(['normal', 'italic', 'oblique'], style=style)
self._slant = style
def set_variant(self, variant):
"""
Set the font variant.
Parameters
----------
variant : {'normal', 'small-caps'}, default: :rc:`font.variant`
"""
if variant is None:
variant = mpl.rcParams['font.variant']
_api.check_in_list(['normal', 'small-caps'], variant=variant)
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight.
Parameters
----------
weight : int or {'ultralight', 'light', 'normal', 'regular', 'book', \
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', \
'extra bold', 'black'}, default: :rc:`font.weight`
If int, must be in the range 0-1000.
"""
if weight is None:
weight = mpl.rcParams['font.weight']
if weight in weight_dict:
self._weight = weight
return
try:
weight = int(weight)
except ValueError:
pass
else:
if 0 <= weight <= 1000:
self._weight = weight
return
raise ValueError(f"{weight=} is invalid")
def set_stretch(self, stretch):
"""
Set the font stretch or width.
Parameters
----------
stretch : int or {'ultra-condensed', 'extra-condensed', 'condensed', \
'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', \
'ultra-expanded'}, default: :rc:`font.stretch`
If int, must be in the range 0-1000.
"""
if stretch is None:
stretch = mpl.rcParams['font.stretch']
if stretch in stretch_dict:
self._stretch = stretch
return
try:
stretch = int(stretch)
except ValueError:
pass
else:
if 0 <= stretch <= 1000:
self._stretch = stretch
return
raise ValueError(f"{stretch=} is invalid")
def set_size(self, size):
"""
Set the font size.
Parameters
----------
size : float or {'xx-small', 'x-small', 'small', 'medium', \
'large', 'x-large', 'xx-large'}, default: :rc:`font.size`
If a float, the font size in points. The string values denote
sizes relative to the default font size.
"""
if size is None:
size = mpl.rcParams['font.size']
try:
size = float(size)
except ValueError:
try:
scale = font_scalings[size]
except KeyError as err:
raise ValueError(
"Size is invalid. Valid font size are "
+ ", ".join(map(str, font_scalings))) from err
else:
size = scale * FontManager.get_default_size()
if size < 1.0:
_log.info('Fontsize %1.2f < 1.0 pt not allowed by FreeType. '
'Setting fontsize = 1 pt', size)
size = 1.0
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = os.fspath(file) if file is not None else None
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig_ *pattern*.
This support does not depend on fontconfig; we are merely borrowing its
pattern syntax for use here.
"""
for key, val in parse_fontconfig_pattern(pattern).items():
if type(val) is list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def get_math_fontfamily(self):
"""
Return the name of the font family used for math text.
The default font is :rc:`mathtext.fontset`.
"""
return self._math_fontfamily
def set_math_fontfamily(self, fontfamily):
"""
Set the font family for text in math mode.
If not set explicitly, :rc:`mathtext.fontset` will be used.
Parameters
----------
fontfamily : str
The name of the font family.
Available font families are defined in the
:ref:`default matplotlibrc file `.
See Also
--------
.text.Text.get_math_fontfamily
"""
if fontfamily is None:
fontfamily = mpl.rcParams['mathtext.fontset']
else:
valid_fonts = _validators['mathtext.fontset'].valid.values()
# _check_in_list() Validates the parameter math_fontfamily as
# if it were passed to rcParams['mathtext.fontset']
_api.check_in_list(valid_fonts, math_fontfamily=fontfamily)
self._math_fontfamily = fontfamily
def copy(self):
"""Return a copy of self."""
return copy.copy(self)
# Aliases
set_name = set_family
get_slant = get_style
set_slant = set_style
get_size_in_points = get_size
class _JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, FontManager):
return dict(o.__dict__, __class__='FontManager')
elif isinstance(o, FontEntry):
d = dict(o.__dict__, __class__='FontEntry')
try:
# Cache paths of fonts shipped with Matplotlib relative to the
# Matplotlib data path, which helps in the presence of venvs.
d["fname"] = str(Path(d["fname"]).relative_to(mpl.get_data_path()))
except ValueError:
pass
return d
else:
return super().default(o)
def _json_decode(o):
cls = o.pop('__class__', None)
if cls is None:
return o
elif cls == 'FontManager':
r = FontManager.__new__(FontManager)
r.__dict__.update(o)
return r
elif cls == 'FontEntry':
if not os.path.isabs(o['fname']):
o['fname'] = os.path.join(mpl.get_data_path(), o['fname'])
r = FontEntry(**o)
return r
else:
raise ValueError("Don't know how to deserialize __class__=%s" % cls)
def json_dump(data, filename):
"""
Dump `FontManager` *data* as JSON to the file named *filename*.
See Also
--------
json_load
Notes
-----
File paths that are children of the Matplotlib data path (typically, fonts
shipped with Matplotlib) are stored relative to that data path (to remain
valid across virtualenvs).
This function temporarily locks the output file to prevent multiple
processes from overwriting one another's output.
"""
try:
with cbook._lock_path(filename), open(filename, 'w') as fh:
json.dump(data, fh, cls=_JSONEncoder, indent=2)
except OSError as e:
_log.warning('Could not save font_manager cache %s', e)
def json_load(filename):
"""
Load a `FontManager` from the JSON file named *filename*.
See Also
--------
json_dump
"""
with open(filename) as fh:
return json.load(fh, object_hook=_json_decode)
class FontManager:
"""
On import, the `FontManager` singleton instance creates a list of ttf and
afm fonts and caches their `FontProperties`. The `FontManager.findfont`
method does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, the default
font is returned.
Fonts added with the `FontManager.addfont` method will not persist in the
cache; therefore, `addfont` will need to be called every time Matplotlib is
imported. This method should only be used if and when a font cannot be
installed on your operating system by other means.
Notes
-----
The `FontManager.addfont` method must be called on the global `FontManager`
instance.
Example usage::
import matplotlib.pyplot as plt
from matplotlib import font_manager
font_dirs = ["/resources/fonts"] # The path to the custom font file.
font_files = font_manager.findSystemFonts(fontpaths=font_dirs)
for font_file in font_files:
font_manager.fontManager.addfont(font_file)
"""
# Increment this version number whenever the font cache data
# format or behavior has changed and requires an existing font
# cache files to be rebuilt.
__version__ = 390
def __init__(self, size=None, weight='normal'):
self._version = self.__version__
self.__default_weight = weight
self.default_size = size
# Create list of font paths.
paths = [cbook._get_data_path('fonts', subdir)
for subdir in ['ttf', 'afm', 'pdfcorefonts']]
_log.debug('font search path %s', paths)
self.defaultFamily = {
'ttf': 'DejaVu Sans',
'afm': 'Helvetica'}
self.afmlist = []
self.ttflist = []
# Delay the warning by 5s.
timer = threading.Timer(5, lambda: _log.warning(
'Matplotlib is building the font cache; this may take a moment.'))
timer.start()
try:
for fontext in ["afm", "ttf"]:
for path in [*findSystemFonts(paths, fontext=fontext),
*findSystemFonts(fontext=fontext)]:
try:
self.addfont(path)
except OSError as exc:
_log.info("Failed to open font file %s: %s", path, exc)
except Exception as exc:
_log.info("Failed to extract font properties from %s: "
"%s", path, exc)
finally:
timer.cancel()
def addfont(self, path):
"""
Cache the properties of the font at *path* to make it available to the
`FontManager`. The type of font is inferred from the path suffix.
Parameters
----------
path : str or path-like
Notes
-----
This method is useful for adding a custom font without installing it in
your operating system. See the `FontManager` singleton instance for
usage and caveats about this function.
"""
# Convert to string in case of a path as
# afmFontProperty and FT2Font expect this
path = os.fsdecode(path)
if Path(path).suffix.lower() == ".afm":
with open(path, "rb") as fh:
font = _afm.AFM(fh)
prop = afmFontProperty(path, font)
self.afmlist.append(prop)
else:
font = ft2font.FT2Font(path)
prop = ttfFontProperty(font)
self.ttflist.append(prop)
self._findfont_cached.cache_clear()
@property
def defaultFont(self):
# Lazily evaluated (findfont then caches the result) to avoid including
# the venv path in the json serialization.
return {ext: self.findfont(family, fontext=ext)
for ext, family in self.defaultFamily.items()}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
@staticmethod
def get_default_size():
"""
Return the default font size.
"""
return mpl.rcParams['font.size']
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
@staticmethod
def _expand_aliases(family):
if family in ('sans', 'sans serif'):
family = 'sans-serif'
return mpl.rcParams['font.' + family]
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Return a match score between the list of font families in
*families* and the font family name *family2*.
An exact match at the head of the list returns 0.0.
A match further down the list will return between 0 and 1.
No match will return 1.0.
"""
if not isinstance(families, (list, tuple)):
families = [families]
elif len(families) == 0:
return 1.0
family2 = family2.lower()
step = 1 / len(families)
for i, family1 in enumerate(families):
family1 = family1.lower()
if family1 in font_family_aliases:
options = [*map(str.lower, self._expand_aliases(family1))]
if family2 in options:
idx = options.index(family2)
return (i + (idx / len(options))) * step
elif family1 == family2:
# The score should be weighted by where in the
# list the font was found.
return i * step
return 1.0
def score_style(self, style1, style2):
"""
Return a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif (style1 in ('italic', 'oblique')
and style2 in ('italic', 'oblique')):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Return a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Return a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Return a match score between *weight1* and *weight2*.
The result is 0.0 if both weight1 and weight 2 are given as strings
and have the same value.
Otherwise, the result is the absolute value of the difference between
the CSS numeric values of *weight1* and *weight2*, normalized between
0.05 and 1.0.
"""
# exact match of the weight names, e.g. weight1 == weight2 == "regular"
if cbook._str_equal(weight1, weight2):
return 0.0
w1 = weight1 if isinstance(weight1, Number) else weight_dict[weight1]
w2 = weight2 if isinstance(weight2, Number) else weight_dict[weight2]
return 0.95 * (abs(w1 - w2) / 1000) + 0.05
def score_size(self, size1, size2):
"""
Return a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings[size1]
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72
def findfont(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Find the path to the font file most closely matching the given font properties.
Parameters
----------
prop : str or `~matplotlib.font_manager.FontProperties`
The font properties to search for. This can be either a
`.FontProperties` object or a string defining a
`fontconfig patterns`_.
fontext : {'ttf', 'afm'}, default: 'ttf'
The extension of the font file:
- 'ttf': TrueType and OpenType fonts (.ttf, .ttc, .otf)
- 'afm': Adobe Font Metrics (.afm)
directory : str, optional
If given, only search this directory and its subdirectories.
fallback_to_default : bool
If True, will fall back to the default font family (usually
"DejaVu Sans" or "Helvetica") if the first lookup hard-fails.
rebuild_if_missing : bool
Whether to rebuild the font cache and search again if the first
match appears to point to a nonexisting font (i.e., the font cache
contains outdated entries).
Returns
-------
str
The filename of the best matching font.
Notes
-----
This performs a nearest neighbor search. Each font is given a
similarity score to the target font properties. The first font with
the highest score is returned. If no matches below a certain
threshold are found, the default font (usually DejaVu Sans) is
returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
`_ documentation
for a description of the font finding algorithm.
.. _fontconfig patterns:
https://www.freedesktop.org/software/fontconfig/fontconfig-user.html
"""
# Pass the relevant rcParams (and the font manager, as `self`) to
# _findfont_cached so to prevent using a stale cache entry after an
# rcParam was changed.
rc_params = tuple(tuple(mpl.rcParams[key]) for key in [
"font.serif", "font.sans-serif", "font.cursive", "font.fantasy",
"font.monospace"])
ret = self._findfont_cached(
prop, fontext, directory, fallback_to_default, rebuild_if_missing,
rc_params)
if isinstance(ret, cbook._ExceptionInfo):
raise ret.to_exception()
return ret
def get_font_names(self):
"""Return the list of available fonts."""
return list({font.name for font in self.ttflist})
def _find_fonts_by_props(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Find the paths to the font files most closely matching the given properties.
Parameters
----------
prop : str or `~matplotlib.font_manager.FontProperties`
The font properties to search for. This can be either a
`.FontProperties` object or a string defining a
`fontconfig patterns`_.
fontext : {'ttf', 'afm'}, default: 'ttf'
The extension of the font file:
- 'ttf': TrueType and OpenType fonts (.ttf, .ttc, .otf)
- 'afm': Adobe Font Metrics (.afm)
directory : str, optional
If given, only search this directory and its subdirectories.
fallback_to_default : bool
If True, will fall back to the default font family (usually
"DejaVu Sans" or "Helvetica") if none of the families were found.
rebuild_if_missing : bool
Whether to rebuild the font cache and search again if the first
match appears to point to a nonexisting font (i.e., the font cache
contains outdated entries).
Returns
-------
list[str]
The paths of the fonts found.
Notes
-----
This is an extension/wrapper of the original findfont API, which only
returns a single font for given font properties. Instead, this API
returns a list of filepaths of multiple fonts which closely match the
given font properties. Since this internally uses the original API,
there's no change to the logic of performing the nearest neighbor
search. See `findfont` for more details.
"""
prop = FontProperties._from_any(prop)
fpaths = []
for family in prop.get_family():
cprop = prop.copy()
cprop.set_family(family) # set current prop's family
try:
fpaths.append(
self.findfont(
cprop, fontext, directory,
fallback_to_default=False, # don't fallback to default
rebuild_if_missing=rebuild_if_missing,
)
)
except ValueError:
if family in font_family_aliases:
_log.warning(
"findfont: Generic family %r not found because "
"none of the following families were found: %s",
family, ", ".join(self._expand_aliases(family))
)
else:
_log.warning("findfont: Font family %r not found.", family)
# only add default family if no other font was found and
# fallback_to_default is enabled
if not fpaths:
if fallback_to_default:
dfamily = self.defaultFamily[fontext]
cprop = prop.copy()
cprop.set_family(dfamily)
fpaths.append(
self.findfont(
cprop, fontext, directory,
fallback_to_default=True,
rebuild_if_missing=rebuild_if_missing,
)
)
else:
raise ValueError("Failed to find any font, and fallback "
"to the default font was disabled")
return fpaths
@lru_cache(1024)
def _findfont_cached(self, prop, fontext, directory, fallback_to_default,
rebuild_if_missing, rc_params):
prop = FontProperties._from_any(prop)
fname = prop.get_file()
if fname is not None:
return fname
if fontext == 'afm':
fontlist = self.afmlist
else:
fontlist = self.ttflist
best_score = 1e64
best_font = None
_log.debug('findfont: Matching %s.', prop)
for font in fontlist:
if (directory is not None and
Path(directory) not in Path(font.fname).parents):
continue
# Matching family should have top priority, so multiply it by 10.
score = (self.score_family(prop.get_family(), font.name) * 10
+ self.score_style(prop.get_style(), font.style)
+ self.score_variant(prop.get_variant(), font.variant)
+ self.score_weight(prop.get_weight(), font.weight)
+ self.score_stretch(prop.get_stretch(), font.stretch)
+ self.score_size(prop.get_size(), font.size))
_log.debug('findfont: score(%s) = %s', font, score)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
if fallback_to_default:
_log.warning(
'findfont: Font family %s not found. Falling back to %s.',
prop.get_family(), self.defaultFamily[fontext])
for family in map(str.lower, prop.get_family()):
if family in font_family_aliases:
_log.warning(
"findfont: Generic family %r not found because "
"none of the following families were found: %s",
family, ", ".join(self._expand_aliases(family)))
default_prop = prop.copy()
default_prop.set_family(self.defaultFamily[fontext])
return self.findfont(default_prop, fontext, directory,
fallback_to_default=False)
else:
# This return instead of raise is intentional, as we wish to
# cache that it was not found, which will not occur if it was
# actually raised.
return cbook._ExceptionInfo(
ValueError,
f"Failed to find font {prop}, and fallback to the default font was "
f"disabled"
)
else:
_log.debug('findfont: Matching %s to %s (%r) with score of %f.',
prop, best_font.name, best_font.fname, best_score)
result = best_font.fname
if not os.path.isfile(result):
if rebuild_if_missing:
_log.info(
'findfont: Found a missing font file. Rebuilding cache.')
new_fm = _load_fontmanager(try_read_cache=False)
# Replace self by the new fontmanager, because users may have
# a reference to this specific instance.
# TODO: _load_fontmanager should really be (used by) a method
# modifying the instance in place.
vars(self).update(vars(new_fm))
return self.findfont(
prop, fontext, directory, rebuild_if_missing=False)
else:
# This return instead of raise is intentional, as we wish to
# cache that it was not found, which will not occur if it was
# actually raised.
return cbook._ExceptionInfo(ValueError, "No valid font could be found")
return _cached_realpath(result)
@lru_cache
def is_opentype_cff_font(filename):
"""
Return whether the given font is a Postscript Compact Font Format Font
embedded in an OpenType wrapper. Used by the PostScript and PDF backends
that cannot subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
with open(filename, 'rb') as fd:
return fd.read(4) == b"OTTO"
else:
return False
@lru_cache(64)
def _get_font(font_filepaths, hinting_factor, *, _kerning_factor, thread_id):
first_fontpath, *rest = font_filepaths
return ft2font.FT2Font(
first_fontpath, hinting_factor,
_fallback_list=[
ft2font.FT2Font(
fpath, hinting_factor,
_kerning_factor=_kerning_factor
)
for fpath in rest
],
_kerning_factor=_kerning_factor
)
# FT2Font objects cannot be used across fork()s because they reference the same
# FT_Library object. While invalidating *all* existing FT2Fonts after a fork
# would be too complicated to be worth it, the main way FT2Fonts get reused is
# via the cache of _get_font, which we can empty upon forking (not on Windows,
# which has no fork() or register_at_fork()).
if hasattr(os, "register_at_fork"):
os.register_at_fork(after_in_child=_get_font.cache_clear)
@lru_cache(64)
def _cached_realpath(path):
# Resolving the path avoids embedding the font twice in pdf/ps output if a
# single font is selected using two different relative paths.
return os.path.realpath(path)
def get_font(font_filepaths, hinting_factor=None):
"""
Get an `.ft2font.FT2Font` object given a list of file paths.
Parameters
----------
font_filepaths : Iterable[str, Path, bytes], str, Path, bytes
Relative or absolute paths to the font files to be used.
If a single string, bytes, or `pathlib.Path`, then it will be treated
as a list with that entry only.
If more than one filepath is passed, then the returned FT2Font object
will fall back through the fonts, in the order given, to find a needed
glyph.
Returns
-------
`.ft2font.FT2Font`
"""
if isinstance(font_filepaths, (str, Path, bytes)):
paths = (_cached_realpath(font_filepaths),)
else:
paths = tuple(_cached_realpath(fname) for fname in font_filepaths)
if hinting_factor is None:
hinting_factor = mpl.rcParams['text.hinting_factor']
return _get_font(
# must be a tuple to be cached
paths,
hinting_factor,
_kerning_factor=mpl.rcParams['text.kerning_factor'],
# also key on the thread ID to prevent segfaults with multi-threading
thread_id=threading.get_ident()
)
def _load_fontmanager(*, try_read_cache=True):
fm_path = Path(
mpl.get_cachedir(), f"fontlist-v{FontManager.__version__}.json")
if try_read_cache:
try:
fm = json_load(fm_path)
except Exception:
pass
else:
if getattr(fm, "_version", object()) == FontManager.__version__:
_log.debug("Using fontManager instance from %s", fm_path)
return fm
fm = FontManager()
json_dump(fm, fm_path)
_log.info("generated new fontManager")
return fm
fontManager = _load_fontmanager()
findfont = fontManager.findfont
get_font_names = fontManager.get_font_names
venv\Lib\site-packages\matplotlib\gridspec.py
r"""
:mod:`~matplotlib.gridspec` contains classes that help to layout multiple
`~.axes.Axes` in a grid-like pattern within a figure.
The `GridSpec` specifies the overall grid structure. Individual cells within
the grid are referenced by `SubplotSpec`\s.
Often, users need not access this module directly, and can use higher-level
methods like `~.pyplot.subplots`, `~.pyplot.subplot_mosaic` and
`~.Figure.subfigures`. See the tutorial :ref:`arranging_axes` for a guide.
"""
import copy
import logging
from numbers import Integral
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _pylab_helpers, _tight_layout
from matplotlib.transforms import Bbox
_log = logging.getLogger(__name__)
class GridSpecBase:
"""
A base class of GridSpec that specifies the geometry of the grid
that a subplot will be placed.
"""
def __init__(self, nrows, ncols, height_ratios=None, width_ratios=None):
"""
Parameters
----------
nrows, ncols : int
The number of rows and columns of the grid.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height.
"""
if not isinstance(nrows, Integral) or nrows <= 0:
raise ValueError(
f"Number of rows must be a positive integer, not {nrows!r}")
if not isinstance(ncols, Integral) or ncols <= 0:
raise ValueError(
f"Number of columns must be a positive integer, not {ncols!r}")
self._nrows, self._ncols = nrows, ncols
self.set_height_ratios(height_ratios)
self.set_width_ratios(width_ratios)
def __repr__(self):
height_arg = (f', height_ratios={self._row_height_ratios!r}'
if len(set(self._row_height_ratios)) != 1 else '')
width_arg = (f', width_ratios={self._col_width_ratios!r}'
if len(set(self._col_width_ratios)) != 1 else '')
return '{clsname}({nrows}, {ncols}{optionals})'.format(
clsname=self.__class__.__name__,
nrows=self._nrows,
ncols=self._ncols,
optionals=height_arg + width_arg,
)
nrows = property(lambda self: self._nrows,
doc="The number of rows in the grid.")
ncols = property(lambda self: self._ncols,
doc="The number of columns in the grid.")
def get_geometry(self):
"""
Return a tuple containing the number of rows and columns in the grid.
"""
return self._nrows, self._ncols
def get_subplot_params(self, figure=None):
# Must be implemented in subclasses
pass
def new_subplotspec(self, loc, rowspan=1, colspan=1):
"""
Create and return a `.SubplotSpec` instance.
Parameters
----------
loc : (int, int)
The position of the subplot in the grid as
``(row_index, column_index)``.
rowspan, colspan : int, default: 1
The number of rows and columns the subplot should span in the grid.
"""
loc1, loc2 = loc
subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
return subplotspec
def set_width_ratios(self, width_ratios):
"""
Set the relative widths of the columns.
*width_ratios* must be of length *ncols*. Each column gets a relative
width of ``width_ratios[i] / sum(width_ratios)``.
"""
if width_ratios is None:
width_ratios = [1] * self._ncols
elif len(width_ratios) != self._ncols:
raise ValueError('Expected the given number of width ratios to '
'match the number of columns of the grid')
self._col_width_ratios = width_ratios
def get_width_ratios(self):
"""
Return the width ratios.
This is *None* if no width ratios have been set explicitly.
"""
return self._col_width_ratios
def set_height_ratios(self, height_ratios):
"""
Set the relative heights of the rows.
*height_ratios* must be of length *nrows*. Each row gets a relative
height of ``height_ratios[i] / sum(height_ratios)``.
"""
if height_ratios is None:
height_ratios = [1] * self._nrows
elif len(height_ratios) != self._nrows:
raise ValueError('Expected the given number of height ratios to '
'match the number of rows of the grid')
self._row_height_ratios = height_ratios
def get_height_ratios(self):
"""
Return the height ratios.
This is *None* if no height ratios have been set explicitly.
"""
return self._row_height_ratios
def get_grid_positions(self, fig):
"""
Return the positions of the grid cells in figure coordinates.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure the grid should be applied to. The subplot parameters
(margins and spacing between subplots) are taken from *fig*.
Returns
-------
bottoms, tops, lefts, rights : array
The bottom, top, left, right positions of the grid cells in
figure coordinates.
"""
nrows, ncols = self.get_geometry()
subplot_params = self.get_subplot_params(fig)
left = subplot_params.left
right = subplot_params.right
bottom = subplot_params.bottom
top = subplot_params.top
wspace = subplot_params.wspace
hspace = subplot_params.hspace
tot_width = right - left
tot_height = top - bottom
# calculate accumulated heights of columns
cell_h = tot_height / (nrows + hspace*(nrows-1))
sep_h = hspace * cell_h
norm = cell_h * nrows / sum(self._row_height_ratios)
cell_heights = [r * norm for r in self._row_height_ratios]
sep_heights = [0] + ([sep_h] * (nrows-1))
cell_hs = np.cumsum(np.column_stack([sep_heights, cell_heights]).flat)
# calculate accumulated widths of rows
cell_w = tot_width / (ncols + wspace*(ncols-1))
sep_w = wspace * cell_w
norm = cell_w * ncols / sum(self._col_width_ratios)
cell_widths = [r * norm for r in self._col_width_ratios]
sep_widths = [0] + ([sep_w] * (ncols-1))
cell_ws = np.cumsum(np.column_stack([sep_widths, cell_widths]).flat)
fig_tops, fig_bottoms = (top - cell_hs).reshape((-1, 2)).T
fig_lefts, fig_rights = (left + cell_ws).reshape((-1, 2)).T
return fig_bottoms, fig_tops, fig_lefts, fig_rights
@staticmethod
def _check_gridspec_exists(figure, nrows, ncols):
"""
Check if the figure already has a gridspec with these dimensions,
or create a new one
"""
for ax in figure.get_axes():
gs = ax.get_gridspec()
if gs is not None:
if hasattr(gs, 'get_topmost_subplotspec'):
# This is needed for colorbar gridspec layouts.
# This is probably OK because this whole logic tree
# is for when the user is doing simple things with the
# add_subplot command. For complicated layouts
# like subgridspecs the proper gridspec is passed in...
gs = gs.get_topmost_subplotspec().get_gridspec()
if gs.get_geometry() == (nrows, ncols):
return gs
# else gridspec not found:
return GridSpec(nrows, ncols, figure=figure)
def __getitem__(self, key):
"""Create and return a `.SubplotSpec` instance."""
nrows, ncols = self.get_geometry()
def _normalize(key, size, axis): # Includes last index.
orig_key = key
if isinstance(key, slice):
start, stop, _ = key.indices(size)
if stop > start:
return start, stop - 1
raise IndexError("GridSpec slice would result in no space "
"allocated for subplot")
else:
if key < 0:
key = key + size
if 0 <= key < size:
return key, key
elif axis is not None:
raise IndexError(f"index {orig_key} is out of bounds for "
f"axis {axis} with size {size}")
else: # flat index
raise IndexError(f"index {orig_key} is out of bounds for "
f"GridSpec with size {size}")
if isinstance(key, tuple):
try:
k1, k2 = key
except ValueError as err:
raise ValueError("Unrecognized subplot spec") from err
num1, num2 = np.ravel_multi_index(
[_normalize(k1, nrows, 0), _normalize(k2, ncols, 1)],
(nrows, ncols))
else: # Single key
num1, num2 = _normalize(key, nrows * ncols, None)
return SubplotSpec(self, num1, num2)
def subplots(self, *, sharex=False, sharey=False, squeeze=True,
subplot_kw=None):
"""
Add all subplots specified by this `GridSpec` to its parent figure.
See `.Figure.subplots` for detailed documentation.
"""
figure = self.figure
if figure is None:
raise ValueError("GridSpec.subplots() only works for GridSpecs "
"created with a parent figure")
if not isinstance(sharex, str):
sharex = "all" if sharex else "none"
if not isinstance(sharey, str):
sharey = "all" if sharey else "none"
_api.check_in_list(["all", "row", "col", "none", False, True],
sharex=sharex, sharey=sharey)
if subplot_kw is None:
subplot_kw = {}
# don't mutate kwargs passed by user...
subplot_kw = subplot_kw.copy()
# Create array to hold all Axes.
axarr = np.empty((self._nrows, self._ncols), dtype=object)
for row in range(self._nrows):
for col in range(self._ncols):
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
subplot_kw["sharex"] = shared_with[sharex]
subplot_kw["sharey"] = shared_with[sharey]
axarr[row, col] = figure.add_subplot(
self[row, col], **subplot_kw)
# turn off redundant tick labeling
if sharex in ["col", "all"]:
for ax in axarr.flat:
ax._label_outer_xaxis(skip_non_rectangular_axes=True)
if sharey in ["row", "all"]:
for ax in axarr.flat:
ax._label_outer_yaxis(skip_non_rectangular_axes=True)
if squeeze:
# Discarding unneeded dimensions that equal 1. If we only have one
# subplot, just return it instead of a 1-element array.
return axarr.item() if axarr.size == 1 else axarr.squeeze()
else:
# Returned axis array will be always 2-d, even if nrows=ncols=1.
return axarr
class GridSpec(GridSpecBase):
"""
A grid layout to place subplots within a figure.
The location of the grid cells is determined in a similar way to
`.SubplotParams` using *left*, *right*, *top*, *bottom*, *wspace*
and *hspace*.
Indexing a GridSpec instance returns a `.SubplotSpec`.
"""
def __init__(self, nrows, ncols, figure=None,
left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None):
"""
Parameters
----------
nrows, ncols : int
The number of rows and columns of the grid.
figure : `.Figure`, optional
Only used for constrained layout to create a proper layoutgrid.
left, right, top, bottom : float, optional
Extent of the subplots as a fraction of figure width or height.
Left cannot be larger than right, and bottom cannot be larger than
top. If not given, the values will be inferred from a figure or
rcParams at draw time. See also `GridSpec.get_subplot_params`.
wspace : float, optional
The amount of width reserved for space between subplots,
expressed as a fraction of the average axis width.
If not given, the values will be inferred from a figure or
rcParams when necessary. See also `GridSpec.get_subplot_params`.
hspace : float, optional
The amount of height reserved for space between subplots,
expressed as a fraction of the average axis height.
If not given, the values will be inferred from a figure or
rcParams when necessary. See also `GridSpec.get_subplot_params`.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height.
"""
self.left = left
self.bottom = bottom
self.right = right
self.top = top
self.wspace = wspace
self.hspace = hspace
self.figure = figure
super().__init__(nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
_AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
def update(self, **kwargs):
"""
Update the subplot parameters of the grid.
Parameters that are not explicitly given are not changed. Setting a
parameter to *None* resets it to :rc:`figure.subplot.*`.
Parameters
----------
left, right, top, bottom : float or None, optional
Extent of the subplots as a fraction of figure width or height.
wspace, hspace : float, optional
Spacing between the subplots as a fraction of the average subplot
width / height.
"""
for k, v in kwargs.items():
if k in self._AllowedKeys:
setattr(self, k, v)
else:
raise AttributeError(f"{k} is an unknown keyword")
for figmanager in _pylab_helpers.Gcf.figs.values():
for ax in figmanager.canvas.figure.axes:
if ax.get_subplotspec() is not None:
ss = ax.get_subplotspec().get_topmost_subplotspec()
if ss.get_gridspec() == self:
fig = ax.get_figure(root=False)
ax._set_position(ax.get_subplotspec().get_position(fig))
def get_subplot_params(self, figure=None):
"""
Return the `.SubplotParams` for the GridSpec.
In order of precedence the values are taken from
- non-*None* attributes of the GridSpec
- the provided *figure*
- :rc:`figure.subplot.*`
Note that the ``figure`` attribute of the GridSpec is always ignored.
"""
if figure is None:
kw = {k: mpl.rcParams["figure.subplot."+k]
for k in self._AllowedKeys}
subplotpars = SubplotParams(**kw)
else:
subplotpars = copy.copy(figure.subplotpars)
subplotpars.update(**{k: getattr(self, k) for k in self._AllowedKeys})
return subplotpars
def locally_modified_subplot_params(self):
"""
Return a list of the names of the subplot parameters explicitly set
in the GridSpec.
This is a subset of the attributes of `.SubplotParams`.
"""
return [k for k in self._AllowedKeys if getattr(self, k)]
def tight_layout(self, figure, renderer=None,
pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters
----------
figure : `.Figure`
The figure.
renderer : `.RendererBase` subclass, optional
The renderer to be used.
pad : float
Padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad, w_pad : float, optional
Padding (height/width) between edges of adjacent subplots.
Defaults to *pad*.
rect : tuple (left, bottom, right, top), default: None
(left, bottom, right, top) rectangle in normalized figure
coordinates that the whole subplots area (including labels) will
fit into. Default (None) is the whole figure.
"""
if renderer is None:
renderer = figure._get_renderer()
kwargs = _tight_layout.get_tight_layout_figure(
figure, figure.axes,
_tight_layout.get_subplotspec_list(figure.axes, grid_spec=self),
renderer, pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
if kwargs:
self.update(**kwargs)
class GridSpecFromSubplotSpec(GridSpecBase):
"""
GridSpec whose subplot layout parameters are inherited from the
location specified by a given SubplotSpec.
"""
def __init__(self, nrows, ncols,
subplot_spec,
wspace=None, hspace=None,
height_ratios=None, width_ratios=None):
"""
Parameters
----------
nrows, ncols : int
Number of rows and number of columns of the grid.
subplot_spec : SubplotSpec
Spec from which the layout parameters are inherited.
wspace, hspace : float, optional
See `GridSpec` for more details. If not specified default values
(from the figure or rcParams) are used.
height_ratios : array-like of length *nrows*, optional
See `GridSpecBase` for details.
width_ratios : array-like of length *ncols*, optional
See `GridSpecBase` for details.
"""
self._wspace = wspace
self._hspace = hspace
if isinstance(subplot_spec, SubplotSpec):
self._subplot_spec = subplot_spec
else:
raise TypeError(
"subplot_spec must be type SubplotSpec, "
"usually from GridSpec, or axes.get_subplotspec.")
self.figure = self._subplot_spec.get_gridspec().figure
super().__init__(nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
def get_subplot_params(self, figure=None):
"""Return a dictionary of subplot layout parameters."""
hspace = (self._hspace if self._hspace is not None
else figure.subplotpars.hspace if figure is not None
else mpl.rcParams["figure.subplot.hspace"])
wspace = (self._wspace if self._wspace is not None
else figure.subplotpars.wspace if figure is not None
else mpl.rcParams["figure.subplot.wspace"])
figbox = self._subplot_spec.get_position(figure)
left, bottom, right, top = figbox.extents
return SubplotParams(left=left, right=right,
bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
def get_topmost_subplotspec(self):
"""
Return the topmost `.SubplotSpec` instance associated with the subplot.
"""
return self._subplot_spec.get_topmost_subplotspec()
class SubplotSpec:
"""
The location of a subplot in a `GridSpec`.
.. note::
Likely, you will never instantiate a `SubplotSpec` yourself. Instead,
you will typically obtain one from a `GridSpec` using item-access.
Parameters
----------
gridspec : `~matplotlib.gridspec.GridSpec`
The GridSpec, which the subplot is referencing.
num1, num2 : int
The subplot will occupy the *num1*-th cell of the given
*gridspec*. If *num2* is provided, the subplot will span between
*num1*-th cell and *num2*-th cell **inclusive**.
The index starts from 0.
"""
def __init__(self, gridspec, num1, num2=None):
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def __repr__(self):
return (f"{self.get_gridspec()}["
f"{self.rowspan.start}:{self.rowspan.stop}, "
f"{self.colspan.start}:{self.colspan.stop}]")
@staticmethod
def _from_subplot_args(figure, args):
"""
Construct a `.SubplotSpec` from a parent `.Figure` and either
- a `.SubplotSpec` -- returned as is;
- one or three numbers -- a MATLAB-style subplot specifier.
"""
if len(args) == 1:
arg, = args
if isinstance(arg, SubplotSpec):
return arg
elif not isinstance(arg, Integral):
raise ValueError(
f"Single argument to subplot must be a three-digit "
f"integer, not {arg!r}")
try:
rows, cols, num = map(int, str(arg))
except ValueError:
raise ValueError(
f"Single argument to subplot must be a three-digit "
f"integer, not {arg!r}") from None
elif len(args) == 3:
rows, cols, num = args
else:
raise _api.nargs_error("subplot", takes="1 or 3", given=len(args))
gs = GridSpec._check_gridspec_exists(figure, rows, cols)
if gs is None:
gs = GridSpec(rows, cols, figure=figure)
if isinstance(num, tuple) and len(num) == 2:
if not all(isinstance(n, Integral) for n in num):
raise ValueError(
f"Subplot specifier tuple must contain integers, not {num}"
)
i, j = num
else:
if not isinstance(num, Integral) or num < 1 or num > rows*cols:
raise ValueError(
f"num must be an integer with 1 <= num <= {rows*cols}, "
f"not {num!r}"
)
i = j = num
return gs[i-1:j]
# num2 is a property only to handle the case where it is None and someone
# mutates num1.
@property
def num2(self):
return self.num1 if self._num2 is None else self._num2
@num2.setter
def num2(self, value):
self._num2 = value
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
Return the subplot geometry as tuple ``(n_rows, n_cols, start, stop)``.
The indices *start* and *stop* define the range of the subplot within
the `GridSpec`. *stop* is inclusive (i.e. for a single cell
``start == stop``).
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
@property
def rowspan(self):
"""The rows spanned by this subplot, as a `range` object."""
ncols = self.get_gridspec().ncols
return range(self.num1 // ncols, self.num2 // ncols + 1)
@property
def colspan(self):
"""The columns spanned by this subplot, as a `range` object."""
ncols = self.get_gridspec().ncols
# We explicitly support num2 referring to a column on num1's *left*, so
# we must sort the column indices here so that the range makes sense.
c1, c2 = sorted([self.num1 % ncols, self.num2 % ncols])
return range(c1, c2 + 1)
def is_first_row(self):
return self.rowspan.start == 0
def is_last_row(self):
return self.rowspan.stop == self.get_gridspec().nrows
def is_first_col(self):
return self.colspan.start == 0
def is_last_col(self):
return self.colspan.stop == self.get_gridspec().ncols
def get_position(self, figure):
"""
Update the subplot position from ``figure.subplotpars``.
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
rows, cols = np.unravel_index([self.num1, self.num2], (nrows, ncols))
fig_bottoms, fig_tops, fig_lefts, fig_rights = \
gridspec.get_grid_positions(figure)
fig_bottom = fig_bottoms[rows].min()
fig_top = fig_tops[rows].max()
fig_left = fig_lefts[cols].min()
fig_right = fig_rights[cols].max()
return Bbox.from_extents(fig_left, fig_bottom, fig_right, fig_top)
def get_topmost_subplotspec(self):
"""
Return the topmost `SubplotSpec` instance associated with the subplot.
"""
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
def __eq__(self, other):
"""
Two SubplotSpecs are considered equal if they refer to the same
position(s) in the same `GridSpec`.
"""
# other may not even have the attributes we are checking.
return ((self._gridspec, self.num1, self.num2)
== (getattr(other, "_gridspec", object()),
getattr(other, "num1", object()),
getattr(other, "num2", object())))
def __hash__(self):
return hash((self._gridspec, self.num1, self.num2))
def subgridspec(self, nrows, ncols, **kwargs):
"""
Create a GridSpec within this subplot.
The created `.GridSpecFromSubplotSpec` will have this `SubplotSpec` as
a parent.
Parameters
----------
nrows : int
Number of rows in grid.
ncols : int
Number of columns in grid.
Returns
-------
`.GridSpecFromSubplotSpec`
Other Parameters
----------------
**kwargs
All other parameters are passed to `.GridSpecFromSubplotSpec`.
See Also
--------
matplotlib.pyplot.subplots
Examples
--------
Adding three subplots in the space occupied by a single subplot::
fig = plt.figure()
gs0 = fig.add_gridspec(3, 1)
ax1 = fig.add_subplot(gs0[0])
ax2 = fig.add_subplot(gs0[1])
gssub = gs0[2].subgridspec(1, 3)
for i in range(3):
fig.add_subplot(gssub[0, i])
"""
return GridSpecFromSubplotSpec(nrows, ncols, self, **kwargs)
class SubplotParams:
"""
Parameters defining the positioning of a subplots grid in a figure.
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Defaults are given by :rc:`figure.subplot.[name]`.
Parameters
----------
left : float
The position of the left edge of the subplots,
as a fraction of the figure width.
right : float
The position of the right edge of the subplots,
as a fraction of the figure width.
bottom : float
The position of the bottom edge of the subplots,
as a fraction of the figure height.
top : float
The position of the top edge of the subplots,
as a fraction of the figure height.
wspace : float
The width of the padding between subplots,
as a fraction of the average Axes width.
hspace : float
The height of the padding between subplots,
as a fraction of the average Axes height.
"""
for key in ["left", "bottom", "right", "top", "wspace", "hspace"]:
setattr(self, key, mpl.rcParams[f"figure.subplot.{key}"])
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the dimensions of the passed parameters. *None* means unchanged.
"""
if ((left if left is not None else self.left)
>= (right if right is not None else self.right)):
raise ValueError('left cannot be >= right')
if ((bottom if bottom is not None else self.bottom)
>= (top if top is not None else self.top)):
raise ValueError('bottom cannot be >= top')
if left is not None:
self.left = left
if right is not None:
self.right = right
if bottom is not None:
self.bottom = bottom
if top is not None:
self.top = top
if wspace is not None:
self.wspace = wspace
if hspace is not None:
self.hspace = hspace
venv\Lib\site-packages\matplotlib\hatch.py
"""Contains classes for generating hatch patterns."""
import numpy as np
from matplotlib import _api
from matplotlib.path import Path
class HatchPatternBase:
"""The base class for a hatch pattern."""
pass
class HorizontalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('-') + hatch.count('+')) * density)
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = 0.0
vertices[0::2, 1] = steps
vertices[1::2, 0] = 1.0
vertices[1::2, 1] = steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class VerticalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('|') + hatch.count('+')) * density)
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = steps
vertices[0::2, 1] = 0.0
vertices[1::2, 0] = steps
vertices[1::2, 1] = 1.0
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class NorthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int(
(hatch.count('/') + hatch.count('x') + hatch.count('X')) * density)
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 0.0 - steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 1.0 - steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class SouthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int(
(hatch.count('\\') + hatch.count('x') + hatch.count('X'))
* density)
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 1.0 + steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 0.0 + steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class Shapes(HatchPatternBase):
filled = False
def __init__(self, hatch, density):
if self.num_rows == 0:
self.num_shapes = 0
self.num_vertices = 0
else:
self.num_shapes = ((self.num_rows // 2 + 1) * (self.num_rows + 1) +
(self.num_rows // 2) * self.num_rows)
self.num_vertices = (self.num_shapes *
len(self.shape_vertices) *
(1 if self.filled else 2))
def set_vertices_and_codes(self, vertices, codes):
offset = 1.0 / self.num_rows
shape_vertices = self.shape_vertices * offset * self.size
shape_codes = self.shape_codes
if not self.filled:
shape_vertices = np.concatenate( # Forward, then backward.
[shape_vertices, shape_vertices[::-1] * 0.9])
shape_codes = np.concatenate([shape_codes, shape_codes])
vertices_parts = []
codes_parts = []
for row in range(self.num_rows + 1):
if row % 2 == 0:
cols = np.linspace(0, 1, self.num_rows + 1)
else:
cols = np.linspace(offset / 2, 1 - offset / 2, self.num_rows)
row_pos = row * offset
for col_pos in cols:
vertices_parts.append(shape_vertices + [col_pos, row_pos])
codes_parts.append(shape_codes)
np.concatenate(vertices_parts, out=vertices)
np.concatenate(codes_parts, out=codes)
class Circles(Shapes):
def __init__(self, hatch, density):
path = Path.unit_circle()
self.shape_vertices = path.vertices
self.shape_codes = path.codes
super().__init__(hatch, density)
class SmallCircles(Circles):
size = 0.2
def __init__(self, hatch, density):
self.num_rows = (hatch.count('o')) * density
super().__init__(hatch, density)
class LargeCircles(Circles):
size = 0.35
def __init__(self, hatch, density):
self.num_rows = (hatch.count('O')) * density
super().__init__(hatch, density)
class SmallFilledCircles(Circles):
size = 0.1
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('.')) * density
super().__init__(hatch, density)
class Stars(Shapes):
size = 1.0 / 3.0
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('*')) * density
path = Path.unit_regular_star(5)
self.shape_vertices = path.vertices
self.shape_codes = np.full(len(self.shape_vertices), Path.LINETO,
dtype=Path.code_type)
self.shape_codes[0] = Path.MOVETO
super().__init__(hatch, density)
_hatch_types = [
HorizontalHatch,
VerticalHatch,
NorthEastHatch,
SouthEastHatch,
SmallCircles,
LargeCircles,
SmallFilledCircles,
Stars
]
def _validate_hatch_pattern(hatch):
valid_hatch_patterns = set(r'-+|/\xXoO.*')
if hatch is not None:
invalids = set(hatch).difference(valid_hatch_patterns)
if invalids:
valid = ''.join(sorted(valid_hatch_patterns))
invalids = ''.join(sorted(invalids))
_api.warn_deprecated(
'3.4',
removal='3.11', # one release after custom hatches (#20690)
message=f'hatch must consist of a string of "{valid}" or '
'None, but found the following invalid values '
f'"{invalids}". Passing invalid values is deprecated '
'since %(since)s and will become an error in %(removal)s.'
)
def get_path(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates Path to render
the hatch in a unit square. *density* is the number of lines per
unit square.
"""
density = int(density)
patterns = [hatch_type(hatchpattern, density)
for hatch_type in _hatch_types]
num_vertices = sum([pattern.num_vertices for pattern in patterns])
if num_vertices == 0:
return Path(np.empty((0, 2)))
vertices = np.empty((num_vertices, 2))
codes = np.empty(num_vertices, Path.code_type)
cursor = 0
for pattern in patterns:
if pattern.num_vertices != 0:
vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
codes_chunk = codes[cursor:cursor + pattern.num_vertices]
pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
cursor += pattern.num_vertices
return Path(vertices, codes)
venv\Lib\site-packages\matplotlib\image.py
"""
The image module supports basic image loading, rescaling and display
operations.
"""
import math
import os
import logging
from pathlib import Path
import warnings
import numpy as np
import PIL.Image
import PIL.PngImagePlugin
import matplotlib as mpl
from matplotlib import _api, cbook
# For clarity, names from _image are given explicitly in this module
from matplotlib import _image
# For user convenience, the names from _image are also imported into
# the image namespace
from matplotlib._image import * # noqa: F401, F403
import matplotlib.artist as martist
import matplotlib.colorizer as mcolorizer
from matplotlib.backend_bases import FigureCanvasBase
import matplotlib.colors as mcolors
from matplotlib.transforms import (
Affine2D, BboxBase, Bbox, BboxTransform, BboxTransformTo,
IdentityTransform, TransformedBbox)
_log = logging.getLogger(__name__)
# map interpolation strings to module constants
_interpd_ = {
'auto': _image.NEAREST, # this will use nearest or Hanning...
'none': _image.NEAREST, # fall back to nearest when not supported
'nearest': _image.NEAREST,
'bilinear': _image.BILINEAR,
'bicubic': _image.BICUBIC,
'spline16': _image.SPLINE16,
'spline36': _image.SPLINE36,
'hanning': _image.HANNING,
'hamming': _image.HAMMING,
'hermite': _image.HERMITE,
'kaiser': _image.KAISER,
'quadric': _image.QUADRIC,
'catrom': _image.CATROM,
'gaussian': _image.GAUSSIAN,
'bessel': _image.BESSEL,
'mitchell': _image.MITCHELL,
'sinc': _image.SINC,
'lanczos': _image.LANCZOS,
'blackman': _image.BLACKMAN,
'antialiased': _image.NEAREST, # this will use nearest or Hanning...
}
interpolations_names = set(_interpd_)
def composite_images(images, renderer, magnification=1.0):
"""
Composite a number of RGBA images into one. The images are
composited in the order in which they appear in the *images* list.
Parameters
----------
images : list of Images
Each must have a `make_image` method. For each image,
`can_composite` should return `True`, though this is not
enforced by this function. Each image must have a purely
affine transformation with no shear.
renderer : `.RendererBase`
magnification : float, default: 1
The additional magnification to apply for the renderer in use.
Returns
-------
image : (M, N, 4) `numpy.uint8` array
The composited RGBA image.
offset_x, offset_y : float
The (left, bottom) offset where the composited image should be placed
in the output figure.
"""
if len(images) == 0:
return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
parts = []
bboxes = []
for image in images:
data, x, y, trans = image.make_image(renderer, magnification)
if data is not None:
x *= magnification
y *= magnification
parts.append((data, x, y, image._get_scalar_alpha()))
bboxes.append(
Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))
if len(parts) == 0:
return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
bbox = Bbox.union(bboxes)
output = np.zeros(
(int(bbox.height), int(bbox.width), 4), dtype=np.uint8)
for data, x, y, alpha in parts:
trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)
_image.resample(data, output, trans, _image.NEAREST,
resample=False, alpha=alpha)
return output, bbox.x0 / magnification, bbox.y0 / magnification
def _draw_list_compositing_images(
renderer, parent, artists, suppress_composite=None):
"""
Draw a sorted list of artists, compositing images into a single
image where possible.
For internal Matplotlib use only: It is here to reduce duplication
between `Figure.draw` and `Axes.draw`, but otherwise should not be
generally useful.
"""
has_images = any(isinstance(x, _ImageBase) for x in artists)
# override the renderer default if suppressComposite is not None
not_composite = (suppress_composite if suppress_composite is not None
else renderer.option_image_nocomposite())
if not_composite or not has_images:
for a in artists:
a.draw(renderer)
else:
# Composite any adjacent images together
image_group = []
mag = renderer.get_image_magnification()
def flush_images():
if len(image_group) == 1:
image_group[0].draw(renderer)
elif len(image_group) > 1:
data, l, b = composite_images(image_group, renderer, mag)
if data.size != 0:
gc = renderer.new_gc()
gc.set_clip_rectangle(parent.bbox)
gc.set_clip_path(parent.get_clip_path())
renderer.draw_image(gc, round(l), round(b), data)
gc.restore()
del image_group[:]
for a in artists:
if (isinstance(a, _ImageBase) and a.can_composite() and
a.get_clip_on() and not a.get_clip_path()):
image_group.append(a)
else:
flush_images()
a.draw(renderer)
flush_images()
def _resample(
image_obj, data, out_shape, transform, *, resample=None, alpha=1):
"""
Convenience wrapper around `._image.resample` to resample *data* to
*out_shape* (with a third dimension if *data* is RGBA) that takes care of
allocating the output array and fetching the relevant properties from the
Image object *image_obj*.
"""
# AGG can only handle coordinates smaller than 24-bit signed integers,
# so raise errors if the input data is larger than _image.resample can
# handle.
msg = ('Data with more than {n} cannot be accurately displayed. '
'Downsampling to less than {n} before displaying. '
'To remove this warning, manually downsample your data.')
if data.shape[1] > 2**23:
warnings.warn(msg.format(n='2**23 columns'))
step = int(np.ceil(data.shape[1] / 2**23))
data = data[:, ::step]
transform = Affine2D().scale(step, 1) + transform
if data.shape[0] > 2**24:
warnings.warn(msg.format(n='2**24 rows'))
step = int(np.ceil(data.shape[0] / 2**24))
data = data[::step, :]
transform = Affine2D().scale(1, step) + transform
# decide if we need to apply anti-aliasing if the data is upsampled:
# compare the number of displayed pixels to the number of
# the data pixels.
interpolation = image_obj.get_interpolation()
if interpolation in ['antialiased', 'auto']:
# don't antialias if upsampling by an integer number or
# if zooming in more than a factor of 3
pos = np.array([[0, 0], [data.shape[1], data.shape[0]]])
disp = transform.transform(pos)
dispx = np.abs(np.diff(disp[:, 0]))
dispy = np.abs(np.diff(disp[:, 1]))
if ((dispx > 3 * data.shape[1] or
dispx == data.shape[1] or
dispx == 2 * data.shape[1]) and
(dispy > 3 * data.shape[0] or
dispy == data.shape[0] or
dispy == 2 * data.shape[0])):
interpolation = 'nearest'
else:
interpolation = 'hanning'
out = np.zeros(out_shape + data.shape[2:], data.dtype) # 2D->2D, 3D->3D.
if resample is None:
resample = image_obj.get_resample()
_image.resample(data, out, transform,
_interpd_[interpolation],
resample,
alpha,
image_obj.get_filternorm(),
image_obj.get_filterrad())
return out
def _rgb_to_rgba(A):
"""
Convert an RGB image to RGBA, as required by the image resample C++
extension.
"""
rgba = np.zeros((A.shape[0], A.shape[1], 4), dtype=A.dtype)
rgba[:, :, :3] = A
if rgba.dtype == np.uint8:
rgba[:, :, 3] = 255
else:
rgba[:, :, 3] = 1.0
return rgba
class _ImageBase(mcolorizer.ColorizingArtist):
"""
Base class for images.
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
zorder = 0
def __init__(self, ax,
cmap=None,
norm=None,
colorizer=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
*,
interpolation_stage=None,
**kwargs
):
super().__init__(self._get_colorizer(cmap, norm, colorizer))
if origin is None:
origin = mpl.rcParams['image.origin']
_api.check_in_list(["upper", "lower"], origin=origin)
self.origin = origin
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self.set_interpolation(interpolation)
self.set_interpolation_stage(interpolation_stage)
self.set_resample(resample)
self.axes = ax
self._imcache = None
self._internal_update(kwargs)
def __str__(self):
try:
shape = self.get_shape()
return f"{type(self).__name__}(shape={shape!r})"
except RuntimeError:
return type(self).__name__
def __getstate__(self):
# Save some space on the pickle by not saving the cache.
return {**super().__getstate__(), "_imcache": None}
def get_size(self):
"""Return the size of the image as tuple (numrows, numcols)."""
return self.get_shape()[:2]
def get_shape(self):
"""
Return the shape of the image as tuple (numrows, numcols, channels).
"""
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
Parameters
----------
alpha : float or 2D array-like or None
"""
martist.Artist._set_alpha_for_array(self, alpha)
if np.ndim(alpha) not in (0, 2):
raise TypeError('alpha must be a float, two-dimensional '
'array, or None')
self._imcache = None
def _get_scalar_alpha(self):
"""
Get a scalar alpha value to be applied to the artist as a whole.
If the alpha value is a matrix, the method returns 1.0 because pixels
have individual alpha values (see `~._ImageBase._make_image` for
details). If the alpha value is a scalar, the method returns said value
to be applied to the artist as a whole because pixels do not have
individual alpha values.
"""
return 1.0 if self._alpha is None or np.ndim(self._alpha) > 0 \
else self._alpha
def changed(self):
"""
Call this whenever the mappable is changed so observers can update.
"""
self._imcache = None
super().changed()
def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
unsampled=False, round_to_pixel_border=True):
"""
Normalize, rescale, and colormap the image *A* from the given *in_bbox*
(in data space), to the given *out_bbox* (in pixel space) clipped to
the given *clip_bbox* (also in pixel space), and magnified by the
*magnification* factor.
Parameters
----------
A : ndarray
- a (M, N) array interpreted as scalar (greyscale) image,
with one of the dtypes `~numpy.float32`, `~numpy.float64`,
`~numpy.float128`, `~numpy.uint16` or `~numpy.uint8`.
- (M, N, 4) RGBA image with a dtype of `~numpy.float32`,
`~numpy.float64`, `~numpy.float128`, or `~numpy.uint8`.
in_bbox : `~matplotlib.transforms.Bbox`
out_bbox : `~matplotlib.transforms.Bbox`
clip_bbox : `~matplotlib.transforms.Bbox`
magnification : float, default: 1
unsampled : bool, default: False
If True, the image will not be scaled, but an appropriate
affine transformation will be returned instead.
round_to_pixel_border : bool, default: True
If True, the output image size will be rounded to the nearest pixel
boundary. This makes the images align correctly with the Axes.
It should not be used if exact scaling is needed, such as for
`.FigureImage`.
Returns
-------
image : (M, N, 4) `numpy.uint8` array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : `~matplotlib.transforms.Affine2D`
The affine transformation from image to pixel space.
"""
if A is None:
raise RuntimeError('You must first set the image '
'array or the image attribute')
if A.size == 0:
raise RuntimeError("_make_image must get a non-empty image. "
"Your Artist's draw method must filter before "
"this method is called.")
clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)
if clipped_bbox is None:
return None, 0, 0, None
out_width_base = clipped_bbox.width * magnification
out_height_base = clipped_bbox.height * magnification
if out_width_base == 0 or out_height_base == 0:
return None, 0, 0, None
if self.origin == 'upper':
# Flip the input image using a transform. This avoids the
# problem with flipping the array, which results in a copy
# when it is converted to contiguous in the C wrapper
t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
else:
t0 = IdentityTransform()
t0 += (
Affine2D()
.scale(
in_bbox.width / A.shape[1],
in_bbox.height / A.shape[0])
.translate(in_bbox.x0, in_bbox.y0)
+ self.get_transform())
t = (t0
+ (Affine2D()
.translate(-clipped_bbox.x0, -clipped_bbox.y0)
.scale(magnification)))
# So that the image is aligned with the edge of the Axes, we want to
# round up the output width to the next integer. This also means
# scaling the transform slightly to account for the extra subpixel.
if ((not unsampled) and t.is_affine and round_to_pixel_border and
(out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
out_width = math.ceil(out_width_base)
out_height = math.ceil(out_height_base)
extra_width = (out_width - out_width_base) / out_width_base
extra_height = (out_height - out_height_base) / out_height_base
t += Affine2D().scale(1.0 + extra_width, 1.0 + extra_height)
else:
out_width = int(out_width_base)
out_height = int(out_height_base)
out_shape = (out_height, out_width)
if not unsampled:
if not (A.ndim == 2 or A.ndim == 3 and A.shape[-1] in (3, 4)):
raise ValueError(f"Invalid shape {A.shape} for image data")
# if antialiased, this needs to change as window sizes
# change:
interpolation_stage = self._interpolation_stage
if interpolation_stage in ['antialiased', 'auto']:
pos = np.array([[0, 0], [A.shape[1], A.shape[0]]])
disp = t.transform(pos)
dispx = np.abs(np.diff(disp[:, 0])) / A.shape[1]
dispy = np.abs(np.diff(disp[:, 1])) / A.shape[0]
if (dispx < 3) or (dispy < 3):
interpolation_stage = 'rgba'
else:
interpolation_stage = 'data'
if A.ndim == 2 and interpolation_stage == 'data':
# if we are a 2D array, then we are running through the
# norm + colormap transformation. However, in general the
# input data is not going to match the size on the screen so we
# have to resample to the correct number of pixels
if A.dtype.kind == 'f': # Float dtype: scale to same dtype.
scaled_dtype = np.dtype("f8" if A.dtype.itemsize > 4 else "f4")
if scaled_dtype.itemsize < A.dtype.itemsize:
_api.warn_external(f"Casting input data from {A.dtype}"
f" to {scaled_dtype} for imshow.")
else: # Int dtype, likely.
# TODO slice input array first
# Scale to appropriately sized float: use float32 if the
# dynamic range is small, to limit the memory footprint.
da = A.max().astype("f8") - A.min().astype("f8")
scaled_dtype = "f8" if da > 1e8 else "f4"
# resample the input data to the correct resolution and shape
A_resampled = _resample(self, A.astype(scaled_dtype), out_shape, t)
# if using NoNorm, cast back to the original datatype
if isinstance(self.norm, mcolors.NoNorm):
A_resampled = A_resampled.astype(A.dtype)
# Compute out_mask (what screen pixels include "bad" data
# pixels) and out_alpha (to what extent screen pixels are
# covered by data pixels: 0 outside the data extent, 1 inside
# (even for bad data), and intermediate values at the edges).
mask = (np.where(A.mask, np.float32(np.nan), np.float32(1))
if A.mask.shape == A.shape # nontrivial mask
else np.ones_like(A, np.float32))
# we always have to interpolate the mask to account for
# non-affine transformations
out_alpha = _resample(self, mask, out_shape, t, resample=True)
del mask # Make sure we don't use mask anymore!
out_mask = np.isnan(out_alpha)
out_alpha[out_mask] = 1
# Apply the pixel-by-pixel alpha values if present
alpha = self.get_alpha()
if alpha is not None and np.ndim(alpha) > 0:
out_alpha *= _resample(self, alpha, out_shape, t, resample=True)
# mask and run through the norm
resampled_masked = np.ma.masked_array(A_resampled, out_mask)
output = self.norm(resampled_masked)
else:
if A.ndim == 2: # interpolation_stage = 'rgba'
self.norm.autoscale_None(A)
A = self.to_rgba(A)
alpha = self.get_alpha()
if alpha is None: # alpha parameter not specified
if A.shape[2] == 3: # image has no alpha channel
output_alpha = 255 if A.dtype == np.uint8 else 1.0
else:
output_alpha = _resample( # resample alpha channel
self, A[..., 3], out_shape, t)
output = _resample( # resample rgb channels
self, _rgb_to_rgba(A[..., :3]), out_shape, t)
elif np.ndim(alpha) > 0: # Array alpha
# user-specified array alpha overrides the existing alpha channel
output_alpha = _resample(self, alpha, out_shape, t)
output = _resample(
self, _rgb_to_rgba(A[..., :3]), out_shape, t)
else: # Scalar alpha
if A.shape[2] == 3: # broadcast scalar alpha
output_alpha = (255 * alpha) if A.dtype == np.uint8 else alpha
else: # or apply scalar alpha to existing alpha channel
output_alpha = _resample(self, A[..., 3], out_shape, t) * alpha
output = _resample(
self, _rgb_to_rgba(A[..., :3]), out_shape, t)
output[..., 3] = output_alpha # recombine rgb and alpha
# output is now either a 2D array of normed (int or float) data
# or an RGBA array of re-sampled input
output = self.to_rgba(output, bytes=True, norm=False)
# output is now a correctly sized RGBA array of uint8
# Apply alpha *after* if the input was greyscale without a mask
if A.ndim == 2:
alpha = self._get_scalar_alpha()
alpha_channel = output[:, :, 3]
alpha_channel[:] = ( # Assignment will cast to uint8.
alpha_channel.astype(np.float32) * out_alpha * alpha)
else:
if self._imcache is None:
self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
output = self._imcache
# Subset the input image to only the part that will be displayed.
subset = TransformedBbox(clip_bbox, t0.inverted()).frozen()
output = output[
int(max(subset.ymin, 0)):
int(min(subset.ymax + 1, output.shape[0])),
int(max(subset.xmin, 0)):
int(min(subset.xmax + 1, output.shape[1]))]
t = Affine2D().translate(
int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t
return output, clipped_bbox.x0, clipped_bbox.y0, t
def make_image(self, renderer, magnification=1.0, unsampled=False):
"""
Normalize, rescale, and colormap this image's data for rendering using
*renderer*, with the given *magnification*.
If *unsampled* is True, the image will not be scaled, but an
appropriate affine transformation will be returned instead.
Returns
-------
image : (M, N, 4) `numpy.uint8` array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : `~matplotlib.transforms.Affine2D`
The affine transformation from image to pixel space.
"""
raise NotImplementedError('The make_image method must be overridden')
def _check_unsampled_image(self):
"""
Return whether the image is better to be drawn unsampled.
The derived class needs to override it.
"""
return False
@martist.allow_rasterization
def draw(self, renderer):
# if not visible, declare victory and return
if not self.get_visible():
self.stale = False
return
# for empty images, there is nothing to draw!
if self.get_array().size == 0:
self.stale = False
return
# actually render the image.
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_alpha(self._get_scalar_alpha())
gc.set_url(self.get_url())
gc.set_gid(self.get_gid())
if (renderer.option_scale_image() # Renderer supports transform kwarg.
and self._check_unsampled_image()
and self.get_transform().is_affine):
im, l, b, trans = self.make_image(renderer, unsampled=True)
if im is not None:
trans = Affine2D().scale(im.shape[1], im.shape[0]) + trans
renderer.draw_image(gc, l, b, im, trans)
else:
im, l, b, trans = self.make_image(
renderer, renderer.get_image_magnification())
if im is not None:
renderer.draw_image(gc, l, b, im)
gc.restore()
self.stale = False
def contains(self, mouseevent):
"""Test whether the mouse event occurred within the image."""
if (self._different_canvas(mouseevent)
# This doesn't work for figimage.
or not self.axes.contains(mouseevent)[0]):
return False, {}
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
trans = self.get_transform().inverted()
x, y = trans.transform([mouseevent.x, mouseevent.y])
xmin, xmax, ymin, ymax = self.get_extent()
# This checks xmin <= x <= xmax *or* xmax <= x <= xmin.
inside = (x is not None and (x - xmin) * (x - xmax) <= 0
and y is not None and (y - ymin) * (y - ymax) <= 0)
return inside, {}
def write_png(self, fname):
"""Write the image to png file *fname*."""
im = self.to_rgba(self._A[::-1] if self.origin == 'lower' else self._A,
bytes=True, norm=True)
PIL.Image.fromarray(im).save(fname, format="png")
@staticmethod
def _normalize_image_array(A):
"""
Check validity of image-like input *A* and normalize it to a format suitable for
Image subclasses.
"""
A = cbook.safe_masked_invalid(A, copy=True)
if A.dtype != np.uint8 and not np.can_cast(A.dtype, float, "same_kind"):
raise TypeError(f"Image data of dtype {A.dtype} cannot be "
f"converted to float")
if A.ndim == 3 and A.shape[-1] == 1:
A = A.squeeze(-1) # If just (M, N, 1), assume scalar and apply colormap.
if not (A.ndim == 2 or A.ndim == 3 and A.shape[-1] in [3, 4]):
raise TypeError(f"Invalid shape {A.shape} for image data")
if A.ndim == 3:
# If the input data has values outside the valid range (after
# normalisation), we issue a warning and then clip X to the bounds
# - otherwise casting wraps extreme values, hiding outliers and
# making reliable interpretation impossible.
high = 255 if np.issubdtype(A.dtype, np.integer) else 1
if A.min() < 0 or high < A.max():
_log.warning(
'Clipping input data to the valid range for imshow with '
'RGB data ([0..1] for floats or [0..255] for integers). '
'Got range [%s..%s].',
A.min(), A.max()
)
A = np.clip(A, 0, high)
# Cast unsupported integer types to uint8
if A.dtype != np.uint8 and np.issubdtype(A.dtype, np.integer):
A = A.astype(np.uint8)
return A
def set_data(self, A):
"""
Set the image array.
Note that this function does *not* update the normalization used.
Parameters
----------
A : array-like or `PIL.Image.Image`
"""
if isinstance(A, PIL.Image.Image):
A = pil_to_array(A) # Needed e.g. to apply png palette.
self._A = self._normalize_image_array(A)
self._imcache = None
self.stale = True
def set_array(self, A):
"""
Retained for backwards compatibility - use set_data instead.
Parameters
----------
A : array-like
"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked by mistake.
self.set_data(A)
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'auto', 'antialiased', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos',
or 'none'.
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
If None, use :rc:`image.interpolation`. If 'none', the image is
shown as is without interpolating. 'none' is only supported in
agg, ps and pdf backends and will fall back to 'nearest' mode
for other backends.
Parameters
----------
s : {'auto', 'nearest', 'bilinear', 'bicubic', 'spline16', \
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', \
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'none'} or None
"""
s = mpl._val_or_rc(s, 'image.interpolation').lower()
_api.check_in_list(interpolations_names, interpolation=s)
self._interpolation = s
self.stale = True
def get_interpolation_stage(self):
"""
Return when interpolation happens during the transform to RGBA.
One of 'data', 'rgba', 'auto'.
"""
return self._interpolation_stage
def set_interpolation_stage(self, s):
"""
Set when interpolation happens during the transform to RGBA.
Parameters
----------
s : {'data', 'rgba', 'auto'} or None
Whether to apply up/downsampling interpolation in data or RGBA
space. If None, use :rc:`image.interpolation_stage`.
If 'auto' we will check upsampling rate and if less
than 3 then use 'rgba', otherwise use 'data'.
"""
s = mpl._val_or_rc(s, 'image.interpolation_stage')
_api.check_in_list(['data', 'rgba', 'auto'], s=s)
self._interpolation_stage = s
self.stale = True
def can_composite(self):
"""Return whether the image can be composited with its neighbors."""
trans = self.get_transform()
return (
self._interpolation != 'none' and
trans.is_affine and
trans.is_separable)
def set_resample(self, v):
"""
Set whether image resampling is used.
Parameters
----------
v : bool or None
If None, use :rc:`image.resample`.
"""
v = mpl._val_or_rc(v, 'image.resample')
self._resample = v
self.stale = True
def get_resample(self):
"""Return whether image resampling is used."""
return self._resample
def set_filternorm(self, filternorm):
"""
Set whether the resize filter normalizes the weights.
See help for `~.Axes.imshow`.
Parameters
----------
filternorm : bool
"""
self._filternorm = bool(filternorm)
self.stale = True
def get_filternorm(self):
"""Return whether the resize filter normalizes the weights."""
return self._filternorm
def set_filterrad(self, filterrad):
"""
Set the resize filter radius only applicable to some
interpolation schemes -- see help for imshow
Parameters
----------
filterrad : positive float
"""
r = float(filterrad)
if r <= 0:
raise ValueError("The filter radius must be a positive number")
self._filterrad = r
self.stale = True
def get_filterrad(self):
"""Return the filterrad setting."""
return self._filterrad
class AxesImage(_ImageBase):
"""
An image with pixels on a regular grid, attached to an Axes.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The Axes the image will belong to.
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The Colormap instance or registered colormap name used to map scalar
data to colors.
norm : str or `~matplotlib.colors.Normalize`
Maps luminance to 0-1.
interpolation : str, default: :rc:`image.interpolation`
Supported values are 'none', 'auto', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite',
'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell',
'sinc', 'lanczos', 'blackman'.
interpolation_stage : {'data', 'rgba'}, default: 'data'
If 'data', interpolation
is carried out on the data provided by the user. If 'rgba', the
interpolation is carried out after the colormapping has been
applied (visual interpolation).
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Place the [0, 0] index of the array in the upper left or lower left
corner of the Axes. The convention 'upper' is typically used for
matrices and images.
extent : tuple, optional
The data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
filternorm : bool, default: True
A parameter for the antigrain image resize filter
(see the antigrain documentation).
If filternorm is set, the filter normalizes integer values and corrects
the rounding errors. It doesn't do anything with the source floating
point values, it corrects only integers according to the rule of 1.0
which means that any sum of pixel weights must be equal to 1.0. So,
the filter function must produce a graph of the proper shape.
filterrad : float > 0, default: 4
The filter radius for filters that have a radius parameter, i.e. when
interpolation is one of: 'sinc', 'lanczos' or 'blackman'.
resample : bool, default: False
When True, use a full resampling method. When False, only resample when
the output image is larger than the input image.
**kwargs : `~matplotlib.artist.Artist` properties
"""
def __init__(self, ax,
*,
cmap=None,
norm=None,
colorizer=None,
interpolation=None,
origin=None,
extent=None,
filternorm=True,
filterrad=4.0,
resample=False,
interpolation_stage=None,
**kwargs
):
self._extent = extent
super().__init__(
ax,
cmap=cmap,
norm=norm,
colorizer=colorizer,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
interpolation_stage=interpolation_stage,
**kwargs
)
def get_window_extent(self, renderer=None):
x0, x1, y0, y1 = self._extent
bbox = Bbox.from_extents([x0, y0, x1, y1])
return bbox.transformed(self.get_transform())
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
trans = self.get_transform()
# image is created in the canvas coordinate.
x1, x2, y1, y2 = self.get_extent()
bbox = Bbox(np.array([[x1, y1], [x2, y2]]))
transformed_bbox = TransformedBbox(bbox, trans)
clip = ((self.get_clip_box() or self.axes.bbox) if self.get_clip_on()
else self.get_figure(root=True).bbox)
return self._make_image(self._A, bbox, transformed_bbox, clip,
magnification, unsampled=unsampled)
def _check_unsampled_image(self):
"""Return whether the image would be better drawn unsampled."""
return self.get_interpolation() == "none"
def set_extent(self, extent, **kwargs):
"""
Set the image extent.
Parameters
----------
extent : 4-tuple of float
The position and size of the image as tuple
``(left, right, bottom, top)`` in data coordinates.
**kwargs
Other parameters from which unit info (i.e., the *xunits*,
*yunits*, *zunits* (for 3D Axes), *runits* and *thetaunits* (for
polar Axes) entries are applied, if present.
Notes
-----
This updates `.Axes.dataLim`, and, if autoscaling, sets `.Axes.viewLim`
to tightly fit the image, regardless of `~.Axes.dataLim`. Autoscaling
state is not changed, so a subsequent call to `.Axes.autoscale_view`
will redo the autoscaling in accord with `~.Axes.dataLim`.
"""
(xmin, xmax), (ymin, ymax) = self.axes._process_unit_info(
[("x", [extent[0], extent[1]]),
("y", [extent[2], extent[3]])],
kwargs)
if kwargs:
raise _api.kwarg_error("set_extent", kwargs)
xmin = self.axes._validate_converted_limits(
xmin, self.convert_xunits)
xmax = self.axes._validate_converted_limits(
xmax, self.convert_xunits)
ymin = self.axes._validate_converted_limits(
ymin, self.convert_yunits)
ymax = self.axes._validate_converted_limits(
ymax, self.convert_yunits)
extent = [xmin, xmax, ymin, ymax]
self._extent = extent
corners = (xmin, ymin), (xmax, ymax)
self.axes.update_datalim(corners)
self.sticky_edges.x[:] = [xmin, xmax]
self.sticky_edges.y[:] = [ymin, ymax]
if self.axes.get_autoscalex_on():
self.axes.set_xlim((xmin, xmax), auto=None)
if self.axes.get_autoscaley_on():
self.axes.set_ylim((ymin, ymax), auto=None)
self.stale = True
def get_extent(self):
"""Return the image extent as tuple (left, right, bottom, top)."""
if self._extent is not None:
return self._extent
else:
sz = self.get_size()
numrows, numcols = sz
if self.origin == 'upper':
return (-0.5, numcols-0.5, numrows-0.5, -0.5)
else:
return (-0.5, numcols-0.5, -0.5, numrows-0.5)
def get_cursor_data(self, event):
"""
Return the image value at the event position or *None* if the event is
outside the image.
See Also
--------
matplotlib.artist.Artist.get_cursor_data
"""
xmin, xmax, ymin, ymax = self.get_extent()
if self.origin == 'upper':
ymin, ymax = ymax, ymin
arr = self.get_array()
data_extent = Bbox([[xmin, ymin], [xmax, ymax]])
array_extent = Bbox([[0, 0], [arr.shape[1], arr.shape[0]]])
trans = self.get_transform().inverted()
trans += BboxTransform(boxin=data_extent, boxout=array_extent)
point = trans.transform([event.x, event.y])
if any(np.isnan(point)):
return None
j, i = point.astype(int)
# Clip the coordinates at array bounds
if not (0 <= i < arr.shape[0]) or not (0 <= j < arr.shape[1]):
return None
else:
return arr[i, j]
class NonUniformImage(AxesImage):
"""
An image with pixels on a rectilinear grid.
In contrast to `.AxesImage`, where pixels are on a regular grid,
NonUniformImage allows rows and columns with individual heights / widths.
See also :doc:`/gallery/images_contours_and_fields/image_nonuniform`.
"""
def __init__(self, ax, *, interpolation='nearest', **kwargs):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The Axes the image will belong to.
interpolation : {'nearest', 'bilinear'}, default: 'nearest'
The interpolation scheme used in the resampling.
**kwargs
All other keyword arguments are identical to those of `.AxesImage`.
"""
super().__init__(ax, **kwargs)
self.set_interpolation(interpolation)
def _check_unsampled_image(self):
"""Return False. Do not use unsampled image."""
return False
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
if self._A is None:
raise RuntimeError('You must first set the image array')
if unsampled:
raise ValueError('unsampled not supported on NonUniformImage')
A = self._A
if A.ndim == 2:
if A.dtype != np.uint8:
A = self.to_rgba(A, bytes=True)
else:
A = np.repeat(A[:, :, np.newaxis], 4, 2)
A[:, :, 3] = 255
else:
if A.dtype != np.uint8:
A = (255*A).astype(np.uint8)
if A.shape[2] == 3:
B = np.zeros(tuple([*A.shape[0:2], 4]), np.uint8)
B[:, :, 0:3] = A
B[:, :, 3] = 255
A = B
l, b, r, t = self.axes.bbox.extents
width = int(((round(r) + 0.5) - (round(l) - 0.5)) * magnification)
height = int(((round(t) + 0.5) - (round(b) - 0.5)) * magnification)
invertedTransform = self.axes.transData.inverted()
x_pix = invertedTransform.transform(
[(x, b) for x in np.linspace(l, r, width)])[:, 0]
y_pix = invertedTransform.transform(
[(l, y) for y in np.linspace(b, t, height)])[:, 1]
if self._interpolation == "nearest":
x_mid = (self._Ax[:-1] + self._Ax[1:]) / 2
y_mid = (self._Ay[:-1] + self._Ay[1:]) / 2
x_int = x_mid.searchsorted(x_pix)
y_int = y_mid.searchsorted(y_pix)
# The following is equal to `A[y_int[:, None], x_int[None, :]]`,
# but many times faster. Both casting to uint32 (to have an
# effectively 1D array) and manual index flattening matter.
im = (
np.ascontiguousarray(A).view(np.uint32).ravel()[
np.add.outer(y_int * A.shape[1], x_int)]
.view(np.uint8).reshape((height, width, 4)))
else: # self._interpolation == "bilinear"
# Use np.interp to compute x_int/x_float has similar speed.
x_int = np.clip(
self._Ax.searchsorted(x_pix) - 1, 0, len(self._Ax) - 2)
y_int = np.clip(
self._Ay.searchsorted(y_pix) - 1, 0, len(self._Ay) - 2)
idx_int = np.add.outer(y_int * A.shape[1], x_int)
x_frac = np.clip(
np.divide(x_pix - self._Ax[x_int], np.diff(self._Ax)[x_int],
dtype=np.float32), # Downcasting helps with speed.
0, 1)
y_frac = np.clip(
np.divide(y_pix - self._Ay[y_int], np.diff(self._Ay)[y_int],
dtype=np.float32),
0, 1)
f00 = np.outer(1 - y_frac, 1 - x_frac)
f10 = np.outer(y_frac, 1 - x_frac)
f01 = np.outer(1 - y_frac, x_frac)
f11 = np.outer(y_frac, x_frac)
im = np.empty((height, width, 4), np.uint8)
for chan in range(4):
ac = A[:, :, chan].reshape(-1) # reshape(-1) avoids a copy.
# Shifting the buffer start (`ac[offset:]`) avoids an array
# addition (`ac[idx_int + offset]`).
buf = f00 * ac[idx_int]
buf += f10 * ac[A.shape[1]:][idx_int]
buf += f01 * ac[1:][idx_int]
buf += f11 * ac[A.shape[1] + 1:][idx_int]
im[:, :, chan] = buf # Implicitly casts to uint8.
return im, l, b, IdentityTransform()
def set_data(self, x, y, A):
"""
Set the grid for the pixel centers, and the pixel values.
Parameters
----------
x, y : 1D array-like
Monotonic arrays of shapes (N,) and (M,), respectively, specifying
pixel centers.
A : array-like
(M, N) `~numpy.ndarray` or masked array of values to be
colormapped, or (M, N, 3) RGB array, or (M, N, 4) RGBA array.
"""
A = self._normalize_image_array(A)
x = np.array(x, np.float32)
y = np.array(y, np.float32)
if not (x.ndim == y.ndim == 1 and A.shape[:2] == y.shape + x.shape):
raise TypeError("Axes don't match array shape")
self._A = A
self._Ax = x
self._Ay = y
self._imcache = None
self.stale = True
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_interpolation(self, s):
"""
Parameters
----------
s : {'nearest', 'bilinear'} or None
If None, use :rc:`image.interpolation`.
"""
if s is not None and s not in ('nearest', 'bilinear'):
raise NotImplementedError('Only nearest neighbor and '
'bilinear interpolations are supported')
super().set_interpolation(s)
def get_extent(self):
if self._A is None:
raise RuntimeError('Must set data first')
return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
def set_filternorm(self, filternorm):
pass
def set_filterrad(self, filterrad):
pass
def set_norm(self, norm):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
super().set_norm(norm)
def set_cmap(self, cmap):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
super().set_cmap(cmap)
def get_cursor_data(self, event):
# docstring inherited
x, y = event.xdata, event.ydata
if (x < self._Ax[0] or x > self._Ax[-1] or
y < self._Ay[0] or y > self._Ay[-1]):
return None
j = np.searchsorted(self._Ax, x) - 1
i = np.searchsorted(self._Ay, y) - 1
return self._A[i, j]
class PcolorImage(AxesImage):
"""
Make a pcolor-style plot with an irregular rectangular grid.
This uses a variation of the original irregular image code,
and it is used by pcolorfast for the corresponding grid type.
"""
def __init__(self, ax,
x=None,
y=None,
A=None,
*,
cmap=None,
norm=None,
colorizer=None,
**kwargs
):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The Axes the image will belong to.
x, y : 1D array-like, optional
Monotonic arrays of length N+1 and M+1, respectively, specifying
rectangle boundaries. If not given, will default to
``range(N + 1)`` and ``range(M + 1)``, respectively.
A : array-like
The data to be color-coded. The interpretation depends on the
shape:
- (M, N) `~numpy.ndarray` or masked array: values to be colormapped
- (M, N, 3): RGB array
- (M, N, 4): RGBA array
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The Colormap instance or registered colormap name used to map
scalar data to colors.
norm : str or `~matplotlib.colors.Normalize`
Maps luminance to 0-1.
**kwargs : `~matplotlib.artist.Artist` properties
"""
super().__init__(ax, norm=norm, cmap=cmap, colorizer=colorizer)
self._internal_update(kwargs)
if A is not None:
self.set_data(x, y, A)
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
if self._A is None:
raise RuntimeError('You must first set the image array')
if unsampled:
raise ValueError('unsampled not supported on PColorImage')
if self._imcache is None:
A = self.to_rgba(self._A, bytes=True)
self._imcache = np.pad(A, [(1, 1), (1, 1), (0, 0)], "constant")
padded_A = self._imcache
bg = mcolors.to_rgba(self.axes.patch.get_facecolor(), 0)
bg = (np.array(bg) * 255).astype(np.uint8)
if (padded_A[0, 0] != bg).all():
padded_A[[0, -1], :] = padded_A[:, [0, -1]] = bg
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width = round(width * magnification)
height = round(height * magnification)
vl = self.axes.viewLim
x_pix = np.linspace(vl.x0, vl.x1, width)
y_pix = np.linspace(vl.y0, vl.y1, height)
x_int = self._Ax.searchsorted(x_pix)
y_int = self._Ay.searchsorted(y_pix)
im = ( # See comment in NonUniformImage.make_image re: performance.
padded_A.view(np.uint32).ravel()[
np.add.outer(y_int * padded_A.shape[1], x_int)]
.view(np.uint8).reshape((height, width, 4)))
return im, l, b, IdentityTransform()
def _check_unsampled_image(self):
return False
def set_data(self, x, y, A):
"""
Set the grid for the rectangle boundaries, and the data values.
Parameters
----------
x, y : 1D array-like, optional
Monotonic arrays of length N+1 and M+1, respectively, specifying
rectangle boundaries. If not given, will default to
``range(N + 1)`` and ``range(M + 1)``, respectively.
A : array-like
The data to be color-coded. The interpretation depends on the
shape:
- (M, N) `~numpy.ndarray` or masked array: values to be colormapped
- (M, N, 3): RGB array
- (M, N, 4): RGBA array
"""
A = self._normalize_image_array(A)
x = np.arange(0., A.shape[1] + 1) if x is None else np.array(x, float).ravel()
y = np.arange(0., A.shape[0] + 1) if y is None else np.array(y, float).ravel()
if A.shape[:2] != (y.size - 1, x.size - 1):
raise ValueError(
"Axes don't match array shape. Got %s, expected %s." %
(A.shape[:2], (y.size - 1, x.size - 1)))
# For efficient cursor readout, ensure x and y are increasing.
if x[-1] < x[0]:
x = x[::-1]
A = A[:, ::-1]
if y[-1] < y[0]:
y = y[::-1]
A = A[::-1]
self._A = A
self._Ax = x
self._Ay = y
self._imcache = None
self.stale = True
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def get_cursor_data(self, event):
# docstring inherited
x, y = event.xdata, event.ydata
if (x < self._Ax[0] or x > self._Ax[-1] or
y < self._Ay[0] or y > self._Ay[-1]):
return None
j = np.searchsorted(self._Ax, x) - 1
i = np.searchsorted(self._Ay, y) - 1
return self._A[i, j]
class FigureImage(_ImageBase):
"""An image attached to a figure."""
zorder = 0
_interpolation = 'nearest'
def __init__(self, fig,
*,
cmap=None,
norm=None,
colorizer=None,
offsetx=0,
offsety=0,
origin=None,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
super().__init__(
None,
norm=norm,
cmap=cmap,
colorizer=colorizer,
origin=origin
)
self.set_figure(fig)
self.ox = offsetx
self.oy = offsety
self._internal_update(kwargs)
self.magnification = 1.0
def get_extent(self):
"""Return the image extent as tuple (left, right, bottom, top)."""
numrows, numcols = self.get_size()
return (-0.5 + self.ox, numcols-0.5 + self.ox,
-0.5 + self.oy, numrows-0.5 + self.oy)
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
fig = self.get_figure(root=True)
fac = renderer.dpi/fig.dpi
# fac here is to account for pdf, eps, svg backends where
# figure.dpi is set to 72. This means we need to scale the
# image (using magnification) and offset it appropriately.
bbox = Bbox([[self.ox/fac, self.oy/fac],
[(self.ox/fac + self._A.shape[1]),
(self.oy/fac + self._A.shape[0])]])
width, height = fig.get_size_inches()
width *= renderer.dpi
height *= renderer.dpi
clip = Bbox([[0, 0], [width, height]])
return self._make_image(
self._A, bbox, bbox, clip, magnification=magnification / fac,
unsampled=unsampled, round_to_pixel_border=False)
def set_data(self, A):
"""Set the image array."""
super().set_data(A)
self.stale = True
class BboxImage(_ImageBase):
"""The Image class whose size is determined by the given bbox."""
def __init__(self, bbox,
*,
cmap=None,
norm=None,
colorizer=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
super().__init__(
None,
cmap=cmap,
norm=norm,
colorizer=colorizer,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self.bbox = bbox
def get_window_extent(self, renderer=None):
if renderer is None:
renderer = self.get_figure()._get_renderer()
if isinstance(self.bbox, BboxBase):
return self.bbox
elif callable(self.bbox):
return self.bbox(renderer)
else:
raise ValueError("Unknown type of bbox")
def contains(self, mouseevent):
"""Test whether the mouse event occurred within the image."""
if self._different_canvas(mouseevent) or not self.get_visible():
return False, {}
x, y = mouseevent.x, mouseevent.y
inside = self.get_window_extent().contains(x, y)
return inside, {}
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
width, height = renderer.get_canvas_width_height()
bbox_in = self.get_window_extent(renderer).frozen()
bbox_in._points /= [width, height]
bbox_out = self.get_window_extent(renderer)
clip = Bbox([[0, 0], [width, height]])
self._transform = BboxTransformTo(clip)
return self._make_image(
self._A,
bbox_in, bbox_out, clip, magnification, unsampled=unsampled)
def imread(fname, format=None):
"""
Read an image from a file into an array.
.. note::
This function exists for historical reasons. It is recommended to
use `PIL.Image.open` instead for loading images.
Parameters
----------
fname : str or file-like
The image file to read: a filename, a URL or a file-like object opened
in read-binary mode.
Passing a URL is deprecated. Please open the URL
for reading and pass the result to Pillow, e.g. with
``np.array(PIL.Image.open(urllib.request.urlopen(url)))``.
format : str, optional
The image file format assumed for reading the data. The image is
loaded as a PNG file if *format* is set to "png", if *fname* is a path
or opened file with a ".png" extension, or if it is a URL. In all
other cases, *format* is ignored and the format is auto-detected by
`PIL.Image.open`.
Returns
-------
`numpy.array`
The image data. The returned array has shape
- (M, N) for grayscale images.
- (M, N, 3) for RGB images.
- (M, N, 4) for RGBA images.
PNG images are returned as float arrays (0-1). All other formats are
returned as int arrays, with a bit depth determined by the file's
contents.
"""
# hide imports to speed initial import on systems with slow linkers
from urllib import parse
if format is None:
if isinstance(fname, str):
parsed = parse.urlparse(fname)
# If the string is a URL (Windows paths appear as if they have a
# length-1 scheme), assume png.
if len(parsed.scheme) > 1:
ext = 'png'
else:
ext = Path(fname).suffix.lower()[1:]
elif hasattr(fname, 'geturl'): # Returned by urlopen().
# We could try to parse the url's path and use the extension, but
# returning png is consistent with the block above. Note that this
# if clause has to come before checking for fname.name as
# urlopen("file:///...") also has a name attribute (with the fixed
# value "").
ext = 'png'
elif hasattr(fname, 'name'):
ext = Path(fname.name).suffix.lower()[1:]
else:
ext = 'png'
else:
ext = format
img_open = (
PIL.PngImagePlugin.PngImageFile if ext == 'png' else PIL.Image.open)
if isinstance(fname, str) and len(parse.urlparse(fname).scheme) > 1:
# Pillow doesn't handle URLs directly.
raise ValueError(
"Please open the URL for reading and pass the "
"result to Pillow, e.g. with "
"``np.array(PIL.Image.open(urllib.request.urlopen(url)))``."
)
with img_open(fname) as image:
return (_pil_png_to_float_array(image)
if isinstance(image, PIL.PngImagePlugin.PngImageFile) else
pil_to_array(image))
def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None,
origin=None, dpi=100, *, metadata=None, pil_kwargs=None):
"""
Colormap and save an array as an image file.
RGB(A) images are passed through. Single channel images will be
colormapped according to *cmap* and *norm*.
.. note::
If you want to save a single channel image as gray scale please use an
image I/O library (such as pillow, tifffile, or imageio) directly.
Parameters
----------
fname : str or path-like or file-like
A path or a file-like object to store the image in.
If *format* is not set, then the output format is inferred from the
extension of *fname*, if any, and from :rc:`savefig.format` otherwise.
If *format* is set, it determines the output format.
arr : array-like
The image data. Accepts NumPy arrays or sequences
(e.g., lists or tuples). The shape can be one of
MxN (luminance), MxNx3 (RGB) or MxNx4 (RGBA).
vmin, vmax : float, optional
*vmin* and *vmax* set the color scaling for the image by fixing the
values that map to the colormap color limits. If either *vmin*
or *vmax* is None, that limit is determined from the *arr*
min/max value.
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
A Colormap instance or registered colormap name. The colormap
maps scalar data to colors. It is ignored for RGB(A) data.
format : str, optional
The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when this
is unset is documented under *fname*.
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Indicates whether the ``(0, 0)`` index of the array is in the upper
left or lower left corner of the Axes.
dpi : float
The DPI to store in the metadata of the file. This does not affect the
resolution of the output image. Depending on file format, this may be
rounded to the nearest integer.
metadata : dict, optional
Metadata in the image file. The supported keys depend on the output
format, see the documentation of the respective backends for more
information.
Currently only supported for "png", "pdf", "ps", "eps", and "svg".
pil_kwargs : dict, optional
Keyword arguments passed to `PIL.Image.Image.save`. If the 'pnginfo'
key is present, it completely overrides *metadata*, including the
default 'Software' key.
"""
from matplotlib.figure import Figure
# Normalizing input (e.g., list or tuples) to NumPy array if needed
arr = np.asanyarray(arr)
if isinstance(fname, os.PathLike):
fname = os.fspath(fname)
if format is None:
format = (Path(fname).suffix[1:] if isinstance(fname, str)
else mpl.rcParams["savefig.format"]).lower()
if format in ["pdf", "ps", "eps", "svg"]:
# Vector formats that are not handled by PIL.
if pil_kwargs is not None:
raise ValueError(
f"Cannot use 'pil_kwargs' when saving to {format}")
fig = Figure(dpi=dpi, frameon=False)
fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin,
resize=True)
fig.savefig(fname, dpi=dpi, format=format, transparent=True,
metadata=metadata)
else:
# Don't bother creating an image; this avoids rounding errors on the
# size when dividing and then multiplying by dpi.
if origin is None:
origin = mpl.rcParams["image.origin"]
else:
_api.check_in_list(('upper', 'lower'), origin=origin)
if origin == "lower":
arr = arr[::-1]
if (isinstance(arr, memoryview) and arr.format == "B"
and arr.ndim == 3 and arr.shape[-1] == 4):
# Such an ``arr`` would also be handled fine by sm.to_rgba below
# (after casting with asarray), but it is useful to special-case it
# because that's what backend_agg passes, and can be in fact used
# as is, saving a few operations.
rgba = arr
else:
sm = mcolorizer.Colorizer(cmap=cmap)
sm.set_clim(vmin, vmax)
rgba = sm.to_rgba(arr, bytes=True)
if pil_kwargs is None:
pil_kwargs = {}
else:
# we modify this below, so make a copy (don't modify caller's dict)
pil_kwargs = pil_kwargs.copy()
pil_shape = (rgba.shape[1], rgba.shape[0])
rgba = np.require(rgba, requirements='C')
image = PIL.Image.frombuffer(
"RGBA", pil_shape, rgba, "raw", "RGBA", 0, 1)
if format == "png":
# Only use the metadata kwarg if pnginfo is not set, because the
# semantics of duplicate keys in pnginfo is unclear.
if "pnginfo" in pil_kwargs:
if metadata:
_api.warn_external("'metadata' is overridden by the "
"'pnginfo' entry in 'pil_kwargs'.")
else:
metadata = {
"Software": (f"Matplotlib version{mpl.__version__}, "
f"https://matplotlib.org/"),
**(metadata if metadata is not None else {}),
}
pil_kwargs["pnginfo"] = pnginfo = PIL.PngImagePlugin.PngInfo()
for k, v in metadata.items():
if v is not None:
pnginfo.add_text(k, v)
elif metadata is not None:
raise ValueError(f"metadata not supported for format {format!r}")
if format in ["jpg", "jpeg"]:
format = "jpeg" # Pillow doesn't recognize "jpg".
facecolor = mpl.rcParams["savefig.facecolor"]
if cbook._str_equal(facecolor, "auto"):
facecolor = mpl.rcParams["figure.facecolor"]
color = tuple(int(x * 255) for x in mcolors.to_rgb(facecolor))
background = PIL.Image.new("RGB", pil_shape, color)
background.paste(image, image)
image = background
pil_kwargs.setdefault("format", format)
pil_kwargs.setdefault("dpi", (dpi, dpi))
image.save(fname, **pil_kwargs)
def pil_to_array(pilImage):
"""
Load a `PIL image`_ and return it as a numpy int array.
.. _PIL image: https://pillow.readthedocs.io/en/latest/reference/Image.html
Returns
-------
numpy.array
The array shape depends on the image type:
- (M, N) for grayscale images.
- (M, N, 3) for RGB images.
- (M, N, 4) for RGBA images.
"""
if pilImage.mode in ['RGBA', 'RGBX', 'RGB', 'L']:
# return MxNx4 RGBA, MxNx3 RBA, or MxN luminance array
return np.asarray(pilImage)
elif pilImage.mode.startswith('I;16'):
# return MxN luminance array of uint16
raw = pilImage.tobytes('raw', pilImage.mode)
if pilImage.mode.endswith('B'):
x = np.frombuffer(raw, '>u2')
else:
x = np.frombuffer(raw, '
venv\Lib\site-packages\matplotlib\inset.py
"""
The inset module defines the InsetIndicator class, which draws the rectangle and
connectors required for `.Axes.indicate_inset` and `.Axes.indicate_inset_zoom`.
"""
from . import _api, artist, transforms
from matplotlib.patches import ConnectionPatch, PathPatch, Rectangle
from matplotlib.path import Path
_shared_properties = ('alpha', 'edgecolor', 'linestyle', 'linewidth')
class InsetIndicator(artist.Artist):
"""
An artist to highlight an area of interest.
An inset indicator is a rectangle on the plot at the position indicated by
*bounds* that optionally has lines that connect the rectangle to an inset
Axes (`.Axes.inset_axes`).
.. versionadded:: 3.10
"""
zorder = 4.99
def __init__(self, bounds=None, inset_ax=None, zorder=None, **kwargs):
"""
Parameters
----------
bounds : [x0, y0, width, height], optional
Lower-left corner of rectangle to be marked, and its width
and height. If not set, the bounds will be calculated from the
data limits of inset_ax, which must be supplied.
inset_ax : `~.axes.Axes`, optional
An optional inset Axes to draw connecting lines to. Two lines are
drawn connecting the indicator box to the inset Axes on corners
chosen so as to not overlap with the indicator box.
zorder : float, default: 4.99
Drawing order of the rectangle and connector lines. The default,
4.99, is just below the default level of inset Axes.
**kwargs
Other keyword arguments are passed on to the `.Rectangle` patch.
"""
if bounds is None and inset_ax is None:
raise ValueError("At least one of bounds or inset_ax must be supplied")
self._inset_ax = inset_ax
if bounds is None:
# Work out bounds from inset_ax
self._auto_update_bounds = True
bounds = self._bounds_from_inset_ax()
else:
self._auto_update_bounds = False
x, y, width, height = bounds
self._rectangle = Rectangle((x, y), width, height, clip_on=False, **kwargs)
# Connector positions cannot be calculated till the artist has been added
# to an axes, so just make an empty list for now.
self._connectors = []
super().__init__()
self.set_zorder(zorder)
# Initial style properties for the artist should match the rectangle.
for prop in _shared_properties:
setattr(self, f'_{prop}', artist.getp(self._rectangle, prop))
def _shared_setter(self, prop, val):
"""
Helper function to set the same style property on the artist and its children.
"""
setattr(self, f'_{prop}', val)
artist.setp([self._rectangle, *self._connectors], prop, val)
def set_alpha(self, alpha):
# docstring inherited
self._shared_setter('alpha', alpha)
def set_edgecolor(self, color):
"""
Set the edge color of the rectangle and the connectors.
Parameters
----------
color : :mpltype:`color` or None
"""
self._shared_setter('edgecolor', color)
def set_color(self, c):
"""
Set the edgecolor of the rectangle and the connectors, and the
facecolor for the rectangle.
Parameters
----------
c : :mpltype:`color`
"""
self._shared_setter('edgecolor', c)
self._shared_setter('facecolor', c)
def set_linewidth(self, w):
"""
Set the linewidth in points of the rectangle and the connectors.
Parameters
----------
w : float or None
"""
self._shared_setter('linewidth', w)
def set_linestyle(self, ls):
"""
Set the linestyle of the rectangle and the connectors.
========================================== =================
linestyle description
========================================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
``'none'``, ``'None'``, ``' '``, or ``''`` draw nothing
========================================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq)
where ``onoffseq`` is an even length tuple of on and off ink in points.
Parameters
----------
ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
The line style.
"""
self._shared_setter('linestyle', ls)
def _bounds_from_inset_ax(self):
xlim = self._inset_ax.get_xlim()
ylim = self._inset_ax.get_ylim()
return (xlim[0], ylim[0], xlim[1] - xlim[0], ylim[1] - ylim[0])
def _update_connectors(self):
(x, y) = self._rectangle.get_xy()
width = self._rectangle.get_width()
height = self._rectangle.get_height()
existing_connectors = self._connectors or [None] * 4
# connect the inset_axes to the rectangle
for xy_inset_ax, existing in zip([(0, 0), (0, 1), (1, 0), (1, 1)],
existing_connectors):
# inset_ax positions are in axes coordinates
# The 0, 1 values define the four edges if the inset_ax
# lower_left, upper_left, lower_right upper_right.
ex, ey = xy_inset_ax
if self.axes.xaxis.get_inverted():
ex = 1 - ex
if self.axes.yaxis.get_inverted():
ey = 1 - ey
xy_data = x + ex * width, y + ey * height
if existing is None:
# Create new connection patch with styles inherited from the
# parent artist.
p = ConnectionPatch(
xyA=xy_inset_ax, coordsA=self._inset_ax.transAxes,
xyB=xy_data, coordsB=self.axes.transData,
arrowstyle="-",
edgecolor=self._edgecolor, alpha=self.get_alpha(),
linestyle=self._linestyle, linewidth=self._linewidth)
self._connectors.append(p)
else:
# Only update positioning of existing connection patch. We
# do not want to override any style settings made by the user.
existing.xy1 = xy_inset_ax
existing.xy2 = xy_data
existing.coords1 = self._inset_ax.transAxes
existing.coords2 = self.axes.transData
if existing is None:
# decide which two of the lines to keep visible....
pos = self._inset_ax.get_position()
bboxins = pos.transformed(self.get_figure(root=False).transSubfigure)
rectbbox = transforms.Bbox.from_bounds(x, y, width, height).transformed(
self._rectangle.get_transform())
x0 = rectbbox.x0 < bboxins.x0
x1 = rectbbox.x1 < bboxins.x1
y0 = rectbbox.y0 < bboxins.y0
y1 = rectbbox.y1 < bboxins.y1
self._connectors[0].set_visible(x0 ^ y0)
self._connectors[1].set_visible(x0 == y1)
self._connectors[2].set_visible(x1 == y0)
self._connectors[3].set_visible(x1 ^ y1)
@property
def rectangle(self):
"""`.Rectangle`: the indicator frame."""
return self._rectangle
@property
def connectors(self):
"""
4-tuple of `.patches.ConnectionPatch` or None
The four connector lines connecting to (lower_left, upper_left,
lower_right upper_right) corners of *inset_ax*. Two lines are
set with visibility to *False*, but the user can set the
visibility to True if the automatic choice is not deemed correct.
"""
if self._inset_ax is None:
return
if self._auto_update_bounds:
self._rectangle.set_bounds(self._bounds_from_inset_ax())
self._update_connectors()
return tuple(self._connectors)
def draw(self, renderer):
# docstring inherited
conn_same_style = []
# Figure out which connectors have the same style as the box, so should
# be drawn as a single path.
for conn in self.connectors or []:
if conn.get_visible():
drawn = False
for s in _shared_properties:
if artist.getp(self._rectangle, s) != artist.getp(conn, s):
# Draw this connector by itself
conn.draw(renderer)
drawn = True
break
if not drawn:
# Connector has same style as box.
conn_same_style.append(conn)
if conn_same_style:
# Since at least one connector has the same style as the rectangle, draw
# them as a compound path.
artists = [self._rectangle] + conn_same_style
paths = [a.get_transform().transform_path(a.get_path()) for a in artists]
path = Path.make_compound_path(*paths)
# Create a temporary patch to draw the path.
p = PathPatch(path)
p.update_from(self._rectangle)
p.set_transform(transforms.IdentityTransform())
p.draw(renderer)
return
# Just draw the rectangle
self._rectangle.draw(renderer)
@_api.deprecated(
'3.10',
message=('Since Matplotlib 3.10 indicate_inset_[zoom] returns a single '
'InsetIndicator artist with a rectangle property and a connectors '
'property. From 3.12 it will no longer be possible to unpack the '
'return value into two elements.'))
def __getitem__(self, key):
return [self._rectangle, self.connectors][key]
"""
Classes to layout elements in a `.Figure`.
Figures have a ``layout_engine`` property that holds a subclass of
`~.LayoutEngine` defined here (or *None* for no layout). At draw time
``figure.get_layout_engine().execute()`` is called, the goal of which is
usually to rearrange Axes on the figure to produce a pleasing layout. This is
like a ``draw`` callback but with two differences. First, when printing we
disable the layout engine for the final draw. Second, it is useful to know the
layout engine while the figure is being created. In particular, colorbars are
made differently with different layout engines (for historical reasons).
Matplotlib has two built-in layout engines:
- `.TightLayoutEngine` was the first layout engine added to Matplotlib.
See also :ref:`tight_layout_guide`.
- `.ConstrainedLayoutEngine` is more modern and generally gives better results.
See also :ref:`constrainedlayout_guide`.
Third parties can create their own layout engine by subclassing `.LayoutEngine`.
"""
from contextlib import nullcontext
import matplotlib as mpl
from matplotlib._constrained_layout import do_constrained_layout
from matplotlib._tight_layout import (get_subplotspec_list,
get_tight_layout_figure)
class LayoutEngine:
"""
Base class for Matplotlib layout engines.
A layout engine can be passed to a figure at instantiation or at any time
with `~.figure.Figure.set_layout_engine`. Once attached to a figure, the
layout engine ``execute`` function is called at draw time by
`~.figure.Figure.draw`, providing a special draw-time hook.
.. note::
However, note that layout engines affect the creation of colorbars, so
`~.figure.Figure.set_layout_engine` should be called before any
colorbars are created.
Currently, there are two properties of `LayoutEngine` classes that are
consulted while manipulating the figure:
- ``engine.colorbar_gridspec`` tells `.Figure.colorbar` whether to make the
axes using the gridspec method (see `.colorbar.make_axes_gridspec`) or
not (see `.colorbar.make_axes`);
- ``engine.adjust_compatible`` stops `.Figure.subplots_adjust` from being
run if it is not compatible with the layout engine.
To implement a custom `LayoutEngine`:
1. override ``_adjust_compatible`` and ``_colorbar_gridspec``
2. override `LayoutEngine.set` to update *self._params*
3. override `LayoutEngine.execute` with your implementation
"""
# override these in subclass
_adjust_compatible = None
_colorbar_gridspec = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._params = {}
def set(self, **kwargs):
"""
Set the parameters for the layout engine.
"""
raise NotImplementedError
@property
def colorbar_gridspec(self):
"""
Return a boolean if the layout engine creates colorbars using a
gridspec.
"""
if self._colorbar_gridspec is None:
raise NotImplementedError
return self._colorbar_gridspec
@property
def adjust_compatible(self):
"""
Return a boolean if the layout engine is compatible with
`~.Figure.subplots_adjust`.
"""
if self._adjust_compatible is None:
raise NotImplementedError
return self._adjust_compatible
def get(self):
"""
Return copy of the parameters for the layout engine.
"""
return dict(self._params)
def execute(self, fig):
"""
Execute the layout on the figure given by *fig*.
"""
# subclasses must implement this.
raise NotImplementedError
class PlaceHolderLayoutEngine(LayoutEngine):
"""
This layout engine does not adjust the figure layout at all.
The purpose of this `.LayoutEngine` is to act as a placeholder when the user removes
a layout engine to ensure an incompatible `.LayoutEngine` cannot be set later.
Parameters
----------
adjust_compatible, colorbar_gridspec : bool
Allow the PlaceHolderLayoutEngine to mirror the behavior of whatever
layout engine it is replacing.
"""
def __init__(self, adjust_compatible, colorbar_gridspec, **kwargs):
self._adjust_compatible = adjust_compatible
self._colorbar_gridspec = colorbar_gridspec
super().__init__(**kwargs)
def execute(self, fig):
"""
Do nothing.
"""
return
class TightLayoutEngine(LayoutEngine):
"""
Implements the ``tight_layout`` geometry management. See
:ref:`tight_layout_guide` for details.
"""
_adjust_compatible = True
_colorbar_gridspec = True
def __init__(self, *, pad=1.08, h_pad=None, w_pad=None,
rect=(0, 0, 1, 1), **kwargs):
"""
Initialize tight_layout engine.
Parameters
----------
pad : float, default: 1.08
Padding between the figure edge and the edges of subplots, as a
fraction of the font size.
h_pad, w_pad : float
Padding (height/width) between edges of adjacent subplots.
Defaults to *pad*.
rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1).
rectangle in normalized figure coordinates that the subplots
(including labels) will fit into.
"""
super().__init__(**kwargs)
for td in ['pad', 'h_pad', 'w_pad', 'rect']:
# initialize these in case None is passed in above:
self._params[td] = None
self.set(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
def execute(self, fig):
"""
Execute tight_layout.
This decides the subplot parameters given the padding that
will allow the Axes labels to not be covered by other labels
and Axes.
Parameters
----------
fig : `.Figure` to perform layout on.
See Also
--------
.figure.Figure.tight_layout
.pyplot.tight_layout
"""
info = self._params
renderer = fig._get_renderer()
with getattr(renderer, "_draw_disabled", nullcontext)():
kwargs = get_tight_layout_figure(
fig, fig.axes, get_subplotspec_list(fig.axes), renderer,
pad=info['pad'], h_pad=info['h_pad'], w_pad=info['w_pad'],
rect=info['rect'])
if kwargs:
fig.subplots_adjust(**kwargs)
def set(self, *, pad=None, w_pad=None, h_pad=None, rect=None):
"""
Set the pads for tight_layout.
Parameters
----------
pad : float
Padding between the figure edge and the edges of subplots, as a
fraction of the font size.
w_pad, h_pad : float
Padding (width/height) between edges of adjacent subplots.
Defaults to *pad*.
rect : tuple (left, bottom, right, top)
rectangle in normalized figure coordinates that the subplots
(including labels) will fit into.
"""
for td in self.set.__kwdefaults__:
if locals()[td] is not None:
self._params[td] = locals()[td]
class ConstrainedLayoutEngine(LayoutEngine):
"""
Implements the ``constrained_layout`` geometry management. See
:ref:`constrainedlayout_guide` for details.
"""
_adjust_compatible = False
_colorbar_gridspec = False
def __init__(self, *, h_pad=None, w_pad=None,
hspace=None, wspace=None, rect=(0, 0, 1, 1),
compress=False, **kwargs):
"""
Initialize ``constrained_layout`` settings.
Parameters
----------
h_pad, w_pad : float
Padding around the Axes elements in inches.
Default to :rc:`figure.constrained_layout.h_pad` and
:rc:`figure.constrained_layout.w_pad`.
hspace, wspace : float
Fraction of the figure to dedicate to space between the
axes. These are evenly spread between the gaps between the Axes.
A value of 0.2 for a three-column layout would have a space
of 0.1 of the figure width between each column.
If h/wspace < h/w_pad, then the pads are used instead.
Default to :rc:`figure.constrained_layout.hspace` and
:rc:`figure.constrained_layout.wspace`.
rect : tuple of 4 floats
Rectangle in figure coordinates to perform constrained layout in
(left, bottom, width, height), each from 0-1.
compress : bool
Whether to shift Axes so that white space in between them is
removed. This is useful for simple grids of fixed-aspect Axes (e.g.
a grid of images). See :ref:`compressed_layout`.
"""
super().__init__(**kwargs)
# set the defaults:
self.set(w_pad=mpl.rcParams['figure.constrained_layout.w_pad'],
h_pad=mpl.rcParams['figure.constrained_layout.h_pad'],
wspace=mpl.rcParams['figure.constrained_layout.wspace'],
hspace=mpl.rcParams['figure.constrained_layout.hspace'],
rect=(0, 0, 1, 1))
# set anything that was passed in (None will be ignored):
self.set(w_pad=w_pad, h_pad=h_pad, wspace=wspace, hspace=hspace,
rect=rect)
self._compress = compress
def execute(self, fig):
"""
Perform constrained_layout and move and resize Axes accordingly.
Parameters
----------
fig : `.Figure` to perform layout on.
"""
width, height = fig.get_size_inches()
# pads are relative to the current state of the figure...
w_pad = self._params['w_pad'] / width
h_pad = self._params['h_pad'] / height
return do_constrained_layout(fig, w_pad=w_pad, h_pad=h_pad,
wspace=self._params['wspace'],
hspace=self._params['hspace'],
rect=self._params['rect'],
compress=self._compress)
def set(self, *, h_pad=None, w_pad=None,
hspace=None, wspace=None, rect=None):
"""
Set the pads for constrained_layout.
Parameters
----------
h_pad, w_pad : float
Padding around the Axes elements in inches.
Default to :rc:`figure.constrained_layout.h_pad` and
:rc:`figure.constrained_layout.w_pad`.
hspace, wspace : float
Fraction of the figure to dedicate to space between the
axes. These are evenly spread between the gaps between the Axes.
A value of 0.2 for a three-column layout would have a space
of 0.1 of the figure width between each column.
If h/wspace < h/w_pad, then the pads are used instead.
Default to :rc:`figure.constrained_layout.hspace` and
:rc:`figure.constrained_layout.wspace`.
rect : tuple of 4 floats
Rectangle in figure coordinates to perform constrained layout in
(left, bottom, width, height), each from 0-1.
"""
for td in self.set.__kwdefaults__:
if locals()[td] is not None:
self._params[td] = locals()[td]
venv\Lib\site-packages\matplotlib\legend.py
"""
The legend module defines the Legend class, which is responsible for
drawing legends associated with Axes and/or figures.
.. important::
It is unlikely that you would ever create a Legend instance manually.
Most users would normally create a legend via the `~.Axes.legend`
function. For more details on legends there is also a :ref:`legend guide
`.
The `Legend` class is a container of legend handles and legend texts.
The legend handler map specifies how to create legend handles from artists
(lines, patches, etc.) in the Axes or figures. Default legend handlers are
defined in the :mod:`~matplotlib.legend_handler` module. While not all artist
types are covered by the default legend handlers, custom legend handlers can be
defined to support arbitrary objects.
See the :ref`` for more
information.
"""
import itertools
import logging
import numbers
import time
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring, cbook, colors, offsetbox
from matplotlib.artist import Artist, allow_rasterization
from matplotlib.cbook import silent_list
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import (Patch, Rectangle, Shadow, FancyBboxPatch,
StepPatch)
from matplotlib.collections import (
Collection, CircleCollection, LineCollection, PathCollection,
PolyCollection, RegularPolyCollection)
from matplotlib.text import Text
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.transforms import BboxTransformTo, BboxTransformFrom
from matplotlib.offsetbox import (
AnchoredOffsetbox, DraggableOffsetBox,
HPacker, VPacker,
DrawingArea, TextArea,
)
from matplotlib.container import ErrorbarContainer, BarContainer, StemContainer
from . import legend_handler
class DraggableLegend(DraggableOffsetBox):
def __init__(self, legend, use_blit=False, update="loc"):
"""
Wrapper around a `.Legend` to support mouse dragging.
Parameters
----------
legend : `.Legend`
The `.Legend` instance to wrap.
use_blit : bool, optional
Use blitting for faster image composition. For details see
:ref:`func-animation`.
update : {'loc', 'bbox'}, optional
If "loc", update the *loc* parameter of the legend upon finalizing.
If "bbox", update the *bbox_to_anchor* parameter.
"""
self.legend = legend
_api.check_in_list(["loc", "bbox"], update=update)
self._update = update
super().__init__(legend, legend._legend_box, use_blit=use_blit)
def finalize_offset(self):
if self._update == "loc":
self._update_loc(self.get_loc_in_canvas())
elif self._update == "bbox":
self._update_bbox_to_anchor(self.get_loc_in_canvas())
def _update_loc(self, loc_in_canvas):
bbox = self.legend.get_bbox_to_anchor()
# if bbox has zero width or height, the transformation is
# ill-defined. Fall back to the default bbox_to_anchor.
if bbox.width == 0 or bbox.height == 0:
self.legend.set_bbox_to_anchor(None)
bbox = self.legend.get_bbox_to_anchor()
_bbox_transform = BboxTransformFrom(bbox)
self.legend._loc = tuple(_bbox_transform.transform(loc_in_canvas))
def _update_bbox_to_anchor(self, loc_in_canvas):
loc_in_bbox = self.legend.axes.transAxes.transform(loc_in_canvas)
self.legend.set_bbox_to_anchor(loc_in_bbox)
_legend_kw_doc_base = """
bbox_to_anchor : `.BboxBase`, 2-tuple, or 4-tuple of floats
Box that is used to position the legend in conjunction with *loc*.
Defaults to ``axes.bbox`` (if called as a method to `.Axes.legend`) or
``figure.bbox`` (if ``figure.legend``). This argument allows arbitrary
placement of the legend.
Bbox coordinates are interpreted in the coordinate system given by
*bbox_transform*, with the default transform
Axes or Figure coordinates, depending on which ``legend`` is called.
If a 4-tuple or `.BboxBase` is given, then it specifies the bbox
``(x, y, width, height)`` that the legend is placed in.
To put the legend in the best location in the bottom right
quadrant of the Axes (or figure)::
loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5)
A 2-tuple ``(x, y)`` places the corner of the legend specified by *loc* at
x, y. For example, to put the legend's upper right-hand corner in the
center of the Axes (or figure) the following keywords can be used::
loc='upper right', bbox_to_anchor=(0.5, 0.5)
ncols : int, default: 1
The number of columns that the legend has.
For backward compatibility, the spelling *ncol* is also supported
but it is discouraged. If both are given, *ncols* takes precedence.
prop : None or `~matplotlib.font_manager.FontProperties` or dict
The font properties of the legend. If None (default), the current
:data:`matplotlib.rcParams` will be used.
fontsize : int or {'xx-small', 'x-small', 'small', 'medium', 'large', \
'x-large', 'xx-large'}
The font size of the legend. If the value is numeric the size will be the
absolute font size in points. String values are relative to the current
default font size. This argument is only used if *prop* is not specified.
labelcolor : str or list, default: :rc:`legend.labelcolor`
The color of the text in the legend. Either a valid color string
(for example, 'red'), or a list of color strings. The labelcolor can
also be made to match the color of the line or marker using 'linecolor',
'markerfacecolor' (or 'mfc'), or 'markeredgecolor' (or 'mec').
Labelcolor can be set globally using :rc:`legend.labelcolor`. If None,
use :rc:`text.color`.
numpoints : int, default: :rc:`legend.numpoints`
The number of marker points in the legend when creating a legend
entry for a `.Line2D` (line).
scatterpoints : int, default: :rc:`legend.scatterpoints`
The number of marker points in the legend when creating
a legend entry for a `.PathCollection` (scatter plot).
scatteryoffsets : iterable of floats, default: ``[0.375, 0.5, 0.3125]``
The vertical offset (relative to the font size) for the markers
created for a scatter plot legend entry. 0.0 is at the base the
legend text, and 1.0 is at the top. To draw all markers at the
same height, set to ``[0.5]``.
markerscale : float, default: :rc:`legend.markerscale`
The relative size of legend markers compared to the originally drawn ones.
markerfirst : bool, default: True
If *True*, legend marker is placed to the left of the legend label.
If *False*, legend marker is placed to the right of the legend label.
reverse : bool, default: False
If *True*, the legend labels are displayed in reverse order from the input.
If *False*, the legend labels are displayed in the same order as the input.
.. versionadded:: 3.7
frameon : bool, default: :rc:`legend.frameon`
Whether the legend should be drawn on a patch (frame).
fancybox : bool, default: :rc:`legend.fancybox`
Whether round edges should be enabled around the `.FancyBboxPatch` which
makes up the legend's background.
shadow : None, bool or dict, default: :rc:`legend.shadow`
Whether to draw a shadow behind the legend.
The shadow can be configured using `.Patch` keywords.
Customization via :rc:`legend.shadow` is currently not supported.
framealpha : float, default: :rc:`legend.framealpha`
The alpha transparency of the legend's background.
If *shadow* is activated and *framealpha* is ``None``, the default value is
ignored.
facecolor : "inherit" or color, default: :rc:`legend.facecolor`
The legend's background color.
If ``"inherit"``, use :rc:`axes.facecolor`.
edgecolor : "inherit" or color, default: :rc:`legend.edgecolor`
The legend's background patch edge color.
If ``"inherit"``, use :rc:`axes.edgecolor`.
mode : {"expand", None}
If *mode* is set to ``"expand"`` the legend will be horizontally
expanded to fill the Axes area (or *bbox_to_anchor* if defines
the legend's size).
bbox_transform : None or `~matplotlib.transforms.Transform`
The transform for the bounding box (*bbox_to_anchor*). For a value
of ``None`` (default) the Axes'
:data:`~matplotlib.axes.Axes.transAxes` transform will be used.
title : str or None
The legend's title. Default is no title (``None``).
title_fontproperties : None or `~matplotlib.font_manager.FontProperties` or dict
The font properties of the legend's title. If None (default), the
*title_fontsize* argument will be used if present; if *title_fontsize* is
also None, the current :rc:`legend.title_fontsize` will be used.
title_fontsize : int or {'xx-small', 'x-small', 'small', 'medium', 'large', \
'x-large', 'xx-large'}, default: :rc:`legend.title_fontsize`
The font size of the legend's title.
Note: This cannot be combined with *title_fontproperties*. If you want
to set the fontsize alongside other font properties, use the *size*
parameter in *title_fontproperties*.
alignment : {'center', 'left', 'right'}, default: 'center'
The alignment of the legend title and the box of entries. The entries
are aligned as a single block, so that markers always lined up.
borderpad : float, default: :rc:`legend.borderpad`
The fractional whitespace inside the legend border, in font-size units.
labelspacing : float, default: :rc:`legend.labelspacing`
The vertical space between the legend entries, in font-size units.
handlelength : float, default: :rc:`legend.handlelength`
The length of the legend handles, in font-size units.
handleheight : float, default: :rc:`legend.handleheight`
The height of the legend handles, in font-size units.
handletextpad : float, default: :rc:`legend.handletextpad`
The pad between the legend handle and text, in font-size units.
borderaxespad : float, default: :rc:`legend.borderaxespad`
The pad between the Axes and legend border, in font-size units.
columnspacing : float, default: :rc:`legend.columnspacing`
The spacing between columns, in font-size units.
handler_map : dict or None
The custom dictionary mapping instances or types to a legend
handler. This *handler_map* updates the default handler map
found at `matplotlib.legend.Legend.get_legend_handler_map`.
draggable : bool, default: False
Whether the legend can be dragged with the mouse.
"""
_loc_doc_base = """
loc : str or pair of floats, default: {default}
The location of the legend.
The strings ``'upper left'``, ``'upper right'``, ``'lower left'``,
``'lower right'`` place the legend at the corresponding corner of the
{parent}.
The strings ``'upper center'``, ``'lower center'``, ``'center left'``,
``'center right'`` place the legend at the center of the corresponding edge
of the {parent}.
The string ``'center'`` places the legend at the center of the {parent}.
{best}
The location can also be a 2-tuple giving the coordinates of the lower-left
corner of the legend in {parent} coordinates (in which case *bbox_to_anchor*
will be ignored).
For back-compatibility, ``'center right'`` (but no other location) can also
be spelled ``'right'``, and each "string" location can also be given as a
numeric value:
================== =============
Location String Location Code
================== =============
'best' (Axes only) 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
================== =============
{outside}"""
_loc_doc_best = """
The string ``'best'`` places the legend at the location, among the nine
locations defined so far, with the minimum overlap with other drawn
artists. This option can be quite slow for plots with large amounts of
data; your plotting speed may benefit from providing a specific location.
"""
_legend_kw_axes_st = (
_loc_doc_base.format(parent='axes', default=':rc:`legend.loc`',
best=_loc_doc_best, outside='') +
_legend_kw_doc_base)
_docstring.interpd.register(_legend_kw_axes=_legend_kw_axes_st)
_outside_doc = """
If a figure is using the constrained layout manager, the string codes
of the *loc* keyword argument can get better layout behaviour using the
prefix 'outside'. There is ambiguity at the corners, so 'outside
upper right' will make space for the legend above the rest of the
axes in the layout, and 'outside right upper' will make space on the
right side of the layout. In addition to the values of *loc*
listed above, we have 'outside right upper', 'outside right lower',
'outside left upper', and 'outside left lower'. See
:ref:`legend_guide` for more details.
"""
_legend_kw_figure_st = (
_loc_doc_base.format(parent='figure', default="'upper right'",
best='', outside=_outside_doc) +
_legend_kw_doc_base)
_docstring.interpd.register(_legend_kw_figure=_legend_kw_figure_st)
_legend_kw_both_st = (
_loc_doc_base.format(parent='axes/figure',
default=":rc:`legend.loc` for Axes, 'upper right' for Figure",
best=_loc_doc_best, outside=_outside_doc) +
_legend_kw_doc_base)
_docstring.interpd.register(_legend_kw_doc=_legend_kw_both_st)
_legend_kw_set_loc_st = (
_loc_doc_base.format(parent='axes/figure',
default=":rc:`legend.loc` for Axes, 'upper right' for Figure",
best=_loc_doc_best, outside=_outside_doc))
_docstring.interpd.register(_legend_kw_set_loc_doc=_legend_kw_set_loc_st)
class Legend(Artist):
"""
Place a legend on the figure/axes.
"""
# 'best' is only implemented for Axes legends
codes = {'best': 0, **AnchoredOffsetbox.codes}
zorder = 5
def __str__(self):
return "Legend"
@_docstring.interpd
def __init__(
self, parent, handles, labels,
*,
loc=None,
numpoints=None, # number of points in the legend line
markerscale=None, # relative size of legend markers vs. original
markerfirst=True, # left/right ordering of legend marker and label
reverse=False, # reverse ordering of legend marker and label
scatterpoints=None, # number of scatter points
scatteryoffsets=None,
prop=None, # properties for the legend texts
fontsize=None, # keyword to set font size directly
labelcolor=None, # keyword to set the text color
# spacing & pad defined as a fraction of the font-size
borderpad=None, # whitespace inside the legend border
labelspacing=None, # vertical space between the legend entries
handlelength=None, # length of the legend handles
handleheight=None, # height of the legend handles
handletextpad=None, # pad between the legend handle and text
borderaxespad=None, # pad between the Axes and legend border
columnspacing=None, # spacing between columns
ncols=1, # number of columns
mode=None, # horizontal distribution of columns: None or "expand"
fancybox=None, # True: fancy box, False: rounded box, None: rcParam
shadow=None,
title=None, # legend title
title_fontsize=None, # legend title font size
framealpha=None, # set frame alpha
edgecolor=None, # frame patch edgecolor
facecolor=None, # frame patch facecolor
bbox_to_anchor=None, # bbox to which the legend will be anchored
bbox_transform=None, # transform for the bbox
frameon=None, # draw frame
handler_map=None,
title_fontproperties=None, # properties for the legend title
alignment="center", # control the alignment within the legend box
ncol=1, # synonym for ncols (backward compatibility)
draggable=False # whether the legend can be dragged with the mouse
):
"""
Parameters
----------
parent : `~matplotlib.axes.Axes` or `.Figure`
The artist that contains the legend.
handles : list of (`.Artist` or tuple of `.Artist`)
A list of Artists (lines, patches) to be added to the legend.
labels : list of str
A list of labels to show next to the artists. The length of handles
and labels should be the same. If they are not, they are truncated
to the length of the shorter list.
Other Parameters
----------------
%(_legend_kw_doc)s
Attributes
----------
legend_handles
List of `.Artist` objects added as legend entries.
.. versionadded:: 3.7
"""
# local import only to avoid circularity
from matplotlib.axes import Axes
from matplotlib.figure import FigureBase
super().__init__()
if prop is None:
self.prop = FontProperties(size=mpl._val_or_rc(fontsize, "legend.fontsize"))
else:
self.prop = FontProperties._from_any(prop)
if isinstance(prop, dict) and "size" not in prop:
self.prop.set_size(mpl.rcParams["legend.fontsize"])
self._fontsize = self.prop.get_size_in_points()
self.texts = []
self.legend_handles = []
self._legend_title_box = None
#: A dictionary with the extra handler mappings for this Legend
#: instance.
self._custom_handler_map = handler_map
self.numpoints = mpl._val_or_rc(numpoints, 'legend.numpoints')
self.markerscale = mpl._val_or_rc(markerscale, 'legend.markerscale')
self.scatterpoints = mpl._val_or_rc(scatterpoints, 'legend.scatterpoints')
self.borderpad = mpl._val_or_rc(borderpad, 'legend.borderpad')
self.labelspacing = mpl._val_or_rc(labelspacing, 'legend.labelspacing')
self.handlelength = mpl._val_or_rc(handlelength, 'legend.handlelength')
self.handleheight = mpl._val_or_rc(handleheight, 'legend.handleheight')
self.handletextpad = mpl._val_or_rc(handletextpad, 'legend.handletextpad')
self.borderaxespad = mpl._val_or_rc(borderaxespad, 'legend.borderaxespad')
self.columnspacing = mpl._val_or_rc(columnspacing, 'legend.columnspacing')
self.shadow = mpl._val_or_rc(shadow, 'legend.shadow')
if reverse:
labels = [*reversed(labels)]
handles = [*reversed(handles)]
handles = list(handles)
if len(handles) < 2:
ncols = 1
self._ncols = ncols if ncols != 1 else ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be > 0; it was %d" % numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3. / 8., 4. / 8., 2.5 / 8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = self.scatterpoints // len(self._scatteryoffsets) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets,
reps)[:self.scatterpoints]
# _legend_box is a VPacker instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent, Axes):
self.isaxes = True
self.axes = parent
self.set_figure(parent.get_figure(root=False))
elif isinstance(parent, FigureBase):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError(
"Legend needs either Axes or FigureBase as parent"
)
self.parent = parent
self._mode = mode
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
# Figure out if self.shadow is valid
# If shadow was None, rcParams loads False
# So it shouldn't be None here
self._shadow_props = {'ox': 2, 'oy': -2} # default location offsets
if isinstance(self.shadow, dict):
self._shadow_props.update(self.shadow)
self.shadow = True
elif self.shadow in (0, 1, True, False):
self.shadow = bool(self.shadow)
else:
raise ValueError(
'Legend shadow must be a dict or bool, not '
f'{self.shadow!r} of type {type(self.shadow)}.'
)
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
facecolor = mpl._val_or_rc(facecolor, "legend.facecolor")
if facecolor == 'inherit':
facecolor = mpl.rcParams["axes.facecolor"]
edgecolor = mpl._val_or_rc(edgecolor, "legend.edgecolor")
if edgecolor == 'inherit':
edgecolor = mpl.rcParams["axes.edgecolor"]
fancybox = mpl._val_or_rc(fancybox, "legend.fancybox")
self.legendPatch = FancyBboxPatch(
xy=(0, 0), width=1, height=1,
facecolor=facecolor, edgecolor=edgecolor,
# If shadow is used, default to alpha=1 (#8943).
alpha=(framealpha if framealpha is not None
else 1 if shadow
else mpl.rcParams["legend.framealpha"]),
# The width and height of the legendPatch will be set (in draw())
# to the length that includes the padding. Thus we set pad=0 here.
boxstyle=("round,pad=0,rounding_size=0.2" if fancybox
else "square,pad=0"),
mutation_scale=self._fontsize,
snap=True,
visible=mpl._val_or_rc(frameon, "legend.frameon")
)
self._set_artist_props(self.legendPatch)
_api.check_in_list(["center", "left", "right"], alignment=alignment)
self._alignment = alignment
# init with null renderer
self._init_legend_box(handles, labels, markerfirst)
# Set legend location
self.set_loc(loc)
# figure out title font properties:
if title_fontsize is not None and title_fontproperties is not None:
raise ValueError(
"title_fontsize and title_fontproperties can't be specified "
"at the same time. Only use one of them. ")
title_prop_fp = FontProperties._from_any(title_fontproperties)
if isinstance(title_fontproperties, dict):
if "size" not in title_fontproperties:
title_fontsize = mpl.rcParams["legend.title_fontsize"]
title_prop_fp.set_size(title_fontsize)
elif title_fontsize is not None:
title_prop_fp.set_size(title_fontsize)
elif not isinstance(title_fontproperties, FontProperties):
title_fontsize = mpl.rcParams["legend.title_fontsize"]
title_prop_fp.set_size(title_fontsize)
self.set_title(title, prop=title_prop_fp)
self._draggable = None
self.set_draggable(state=draggable)
# set the text color
color_getters = { # getter function depends on line or patch
'linecolor': ['get_color', 'get_facecolor'],
'markerfacecolor': ['get_markerfacecolor', 'get_facecolor'],
'mfc': ['get_markerfacecolor', 'get_facecolor'],
'markeredgecolor': ['get_markeredgecolor', 'get_edgecolor'],
'mec': ['get_markeredgecolor', 'get_edgecolor'],
}
labelcolor = mpl._val_or_rc(labelcolor, 'legend.labelcolor')
if labelcolor is None:
labelcolor = mpl.rcParams['text.color']
if isinstance(labelcolor, str) and labelcolor in color_getters:
getter_names = color_getters[labelcolor]
for handle, text in zip(self.legend_handles, self.texts):
try:
if handle.get_array() is not None:
continue
except AttributeError:
pass
for getter_name in getter_names:
try:
color = getattr(handle, getter_name)()
if isinstance(color, np.ndarray):
if (
color.shape[0] == 1
or np.isclose(color, color[0]).all()
):
text.set_color(color[0])
else:
pass
else:
text.set_color(color)
break
except AttributeError:
pass
elif cbook._str_equal(labelcolor, 'none'):
for text in self.texts:
text.set_color(labelcolor)
elif np.iterable(labelcolor):
for text, color in zip(self.texts,
itertools.cycle(
colors.to_rgba_array(labelcolor))):
text.set_color(color)
else:
raise ValueError(f"Invalid labelcolor: {labelcolor!r}")
def _set_artist_props(self, a):
"""
Set the boilerplate props for artists added to Axes.
"""
a.set_figure(self.get_figure(root=False))
if self.isaxes:
a.axes = self.axes
a.set_transform(self.get_transform())
@_docstring.interpd
def set_loc(self, loc=None):
"""
Set the location of the legend.
.. versionadded:: 3.8
Parameters
----------
%(_legend_kw_set_loc_doc)s
"""
loc0 = loc
self._loc_used_default = loc is None
if loc is None:
loc = mpl.rcParams["legend.loc"]
if not self.isaxes and loc in [0, 'best']:
loc = 'upper right'
type_err_message = ("loc must be string, coordinate tuple, or"
f" an integer 0-10, not {loc!r}")
# handle outside legends:
self._outside_loc = None
if isinstance(loc, str):
if loc.split()[0] == 'outside':
# strip outside:
loc = loc.split('outside ')[1]
# strip "center" at the beginning
self._outside_loc = loc.replace('center ', '')
# strip first
self._outside_loc = self._outside_loc.split()[0]
locs = loc.split()
if len(locs) > 1 and locs[0] in ('right', 'left'):
# locs doesn't accept "left upper", etc, so swap
if locs[0] != 'center':
locs = locs[::-1]
loc = locs[0] + ' ' + locs[1]
# check that loc is in acceptable strings
loc = _api.check_getitem(self.codes, loc=loc)
elif np.iterable(loc):
# coerce iterable into tuple
loc = tuple(loc)
# validate the tuple represents Real coordinates
if len(loc) != 2 or not all(isinstance(e, numbers.Real) for e in loc):
raise ValueError(type_err_message)
elif isinstance(loc, int):
# validate the integer represents a string numeric value
if loc < 0 or loc > 10:
raise ValueError(type_err_message)
else:
# all other cases are invalid values of loc
raise ValueError(type_err_message)
if self.isaxes and self._outside_loc:
raise ValueError(
f"'outside' option for loc='{loc0}' keyword argument only "
"works for figure legends")
if not self.isaxes and loc == 0:
raise ValueError(
"Automatic legend placement (loc='best') not implemented for "
"figure legend")
tmp = self._loc_used_default
self._set_loc(loc)
self._loc_used_default = tmp # ignore changes done by _set_loc
def _set_loc(self, loc):
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
self._loc_used_default = False
self._loc_real = loc
self.stale = True
self._legend_box.set_offset(self._findoffset)
def set_ncols(self, ncols):
"""Set the number of columns."""
self._ncols = ncols
def _get_loc(self):
return self._loc_real
_loc = property(_get_loc, _set_loc)
def _findoffset(self, width, height, xdescent, ydescent, renderer):
"""Helper function to locate the legend."""
if self._loc == 0: # "best".
x, y = self._find_best_position(width, height, renderer)
elif self._loc in Legend.codes.values(): # Fixed location.
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox,
self.get_bbox_to_anchor(),
renderer)
else: # Axes or figure coordinates.
fx, fy = self._loc
bbox = self.get_bbox_to_anchor()
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
return x + xdescent, y + ydescent
@allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
renderer.open_group('legend', gid=self.get_gid())
fontsize = renderer.points_to_pixels(self._fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the parent (minus pads)
if self._mode in ["expand"]:
pad = 2 * (self.borderaxespad + self.borderpad) * fontsize
self._legend_box.set_width(self.get_bbox_to_anchor().width - pad)
# update the location and size of the legend. This needs to
# be done in any case to clip the figure right.
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.bounds)
self.legendPatch.set_mutation_scale(fontsize)
# self.shadow is validated in __init__
# So by here it is a bool and self._shadow_props contains any configs
if self.shadow:
Shadow(self.legendPatch, **self._shadow_props).draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
self.stale = False
# _default_handler_map defines the default mapping between plot
# elements and the legend handlers.
_default_handler_map = {
StemContainer: legend_handler.HandlerStem(),
ErrorbarContainer: legend_handler.HandlerErrorbar(),
Line2D: legend_handler.HandlerLine2D(),
Patch: legend_handler.HandlerPatch(),
StepPatch: legend_handler.HandlerStepPatch(),
LineCollection: legend_handler.HandlerLineCollection(),
RegularPolyCollection: legend_handler.HandlerRegularPolyCollection(),
CircleCollection: legend_handler.HandlerCircleCollection(),
BarContainer: legend_handler.HandlerPatch(
update_func=legend_handler.update_from_first_child),
tuple: legend_handler.HandlerTuple(),
PathCollection: legend_handler.HandlerPathCollection(),
PolyCollection: legend_handler.HandlerPolyCollection()
}
# (get|set|update)_default_handler_maps are public interfaces to
# modify the default handler map.
@classmethod
def get_default_handler_map(cls):
"""Return the global default handler map, shared by all legends."""
return cls._default_handler_map
@classmethod
def set_default_handler_map(cls, handler_map):
"""Set the global default handler map, shared by all legends."""
cls._default_handler_map = handler_map
@classmethod
def update_default_handler_map(cls, handler_map):
"""Update the global default handler map, shared by all legends."""
cls._default_handler_map.update(handler_map)
def get_legend_handler_map(self):
"""Return this legend instance's handler map."""
default_handler_map = self.get_default_handler_map()
return ({**default_handler_map, **self._custom_handler_map}
if self._custom_handler_map else default_handler_map)
@staticmethod
def get_legend_handler(legend_handler_map, orig_handle):
"""
Return a legend handler from *legend_handler_map* that
corresponds to *orig_handler*.
*legend_handler_map* should be a dictionary object (that is
returned by the get_legend_handler_map method).
It first checks if the *orig_handle* itself is a key in the
*legend_handler_map* and return the associated value.
Otherwise, it checks for each of the classes in its
method-resolution-order. If no matching key is found, it
returns ``None``.
"""
try:
return legend_handler_map[orig_handle]
except (TypeError, KeyError): # TypeError if unhashable.
pass
for handle_type in type(orig_handle).mro():
try:
return legend_handler_map[handle_type]
except KeyError:
pass
return None
def _init_legend_box(self, handles, labels, markerfirst=True):
"""
Initialize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self._fontsize
# legend_box is a HPacker, horizontally packed with columns.
# Each column is a VPacker, vertically packed with legend items.
# Each legend item is a HPacker packed with:
# - handlebox: a DrawingArea which contains the legend handle.
# - labelbox: a TextArea which contains the legend text.
text_list = [] # the list of text instances
handle_list = [] # the list of handle instances
handles_and_labels = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
descent = 0.35 * fontsize * (self.handleheight - 0.7) # heuristic.
height = fontsize * self.handleheight - descent
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their coordinates should
# be given in the display coordinates.
# The transformation of each handle will be automatically set
# to self.get_transform(). If the artist does not use its
# default transform (e.g., Collections), you need to
# manually set their transform to the self.get_transform().
legend_handler_map = self.get_legend_handler_map()
for orig_handle, label in zip(handles, labels):
handler = self.get_legend_handler(legend_handler_map, orig_handle)
if handler is None:
_api.warn_external(
"Legend does not support handles for "
f"{type(orig_handle).__name__} "
"instances.\nA proxy artist may be used "
"instead.\nSee: https://matplotlib.org/"
"stable/users/explain/axes/legend_guide.html"
"#controlling-the-legend-entries")
# No handle for this artist, so we just defer to None.
handle_list.append(None)
else:
textbox = TextArea(label, multilinebaseline=True,
textprops=dict(
verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop))
handlebox = DrawingArea(width=self.handlelength * fontsize,
height=height,
xdescent=0., ydescent=descent)
text_list.append(textbox._text)
# Create the artist for the legend which represents the
# original artist/handle.
handle_list.append(handler.legend_artist(self, orig_handle,
fontsize, handlebox))
handles_and_labels.append((handlebox, textbox))
columnbox = []
# array_split splits n handles_and_labels into ncols columns, with the
# first n%ncols columns having an extra entry. filter(len, ...)
# handles the case where n < ncols: the last ncols-n columns are empty
# and get filtered out.
for handles_and_labels_column in filter(
len, np.array_split(handles_and_labels, self._ncols)):
# pack handlebox and labelbox into itembox
itemboxes = [HPacker(pad=0,
sep=self.handletextpad * fontsize,
children=[h, t] if markerfirst else [t, h],
align="baseline")
for h, t in handles_and_labels_column]
# pack columnbox
alignment = "baseline" if markerfirst else "right"
columnbox.append(VPacker(pad=0,
sep=self.labelspacing * fontsize,
align=alignment,
children=itemboxes))
mode = "expand" if self._mode == "expand" else "fixed"
sep = self.columnspacing * fontsize
self._legend_handle_box = HPacker(pad=0,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_title_box = TextArea("")
self._legend_box = VPacker(pad=self.borderpad * fontsize,
sep=self.labelspacing * fontsize,
align=self._alignment,
children=[self._legend_title_box,
self._legend_handle_box])
self._legend_box.set_figure(self.get_figure(root=False))
self._legend_box.axes = self.axes
self.texts = text_list
self.legend_handles = handle_list
def _auto_legend_data(self, renderer):
"""
Return display coordinates for hit testing for "best" positioning.
Returns
-------
bboxes
List of bounding boxes of all patches.
lines
List of `.Path` corresponding to each line.
offsets
List of (x, y) offsets of all collection.
"""
assert self.isaxes # always holds, as this is only called internally
bboxes = []
lines = []
offsets = []
for artist in self.parent._children:
if isinstance(artist, Line2D):
lines.append(
artist.get_transform().transform_path(artist.get_path()))
elif isinstance(artist, Rectangle):
bboxes.append(
artist.get_bbox().transformed(artist.get_data_transform()))
elif isinstance(artist, Patch):
lines.append(
artist.get_transform().transform_path(artist.get_path()))
elif isinstance(artist, PolyCollection):
lines.extend(artist.get_transform().transform_path(path)
for path in artist.get_paths())
elif isinstance(artist, Collection):
transform, transOffset, hoffsets, _ = artist._prepare_points()
if len(hoffsets):
offsets.extend(transOffset.transform(hoffsets))
elif isinstance(artist, Text):
bboxes.append(artist.get_window_extent(renderer))
return bboxes, lines, offsets
def get_children(self):
# docstring inherited
return [self._legend_box, self.get_frame()]
def get_frame(self):
"""Return the `~.patches.Rectangle` used to frame the legend."""
return self.legendPatch
def get_lines(self):
r"""Return the list of `~.lines.Line2D`\s in the legend."""
return [h for h in self.legend_handles if isinstance(h, Line2D)]
def get_patches(self):
r"""Return the list of `~.patches.Patch`\s in the legend."""
return silent_list('Patch',
[h for h in self.legend_handles
if isinstance(h, Patch)])
def get_texts(self):
r"""Return the list of `~.text.Text`\s in the legend."""
return silent_list('Text', self.texts)
def set_alignment(self, alignment):
"""
Set the alignment of the legend title and the box of entries.
The entries are aligned as a single block, so that markers always
lined up.
Parameters
----------
alignment : {'center', 'left', 'right'}.
"""
_api.check_in_list(["center", "left", "right"], alignment=alignment)
self._alignment = alignment
self._legend_box.align = alignment
def get_alignment(self):
"""Get the alignment value of the legend box"""
return self._legend_box.align
def set_title(self, title, prop=None):
"""
Set legend title and title style.
Parameters
----------
title : str
The legend title.
prop : `.font_manager.FontProperties` or `str` or `pathlib.Path`
The font properties of the legend title.
If a `str`, it is interpreted as a fontconfig pattern parsed by
`.FontProperties`. If a `pathlib.Path`, it is interpreted as the
absolute path to a font file.
"""
self._legend_title_box._text.set_text(title)
if title:
self._legend_title_box._text.set_visible(True)
self._legend_title_box.set_visible(True)
else:
self._legend_title_box._text.set_visible(False)
self._legend_title_box.set_visible(False)
if prop is not None:
self._legend_title_box._text.set_fontproperties(prop)
self.stale = True
def get_title(self):
"""Return the `.Text` instance for the legend title."""
return self._legend_title_box._text
def get_window_extent(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
return self._legend_box.get_window_extent(renderer=renderer)
def get_tightbbox(self, renderer=None):
# docstring inherited
return self._legend_box.get_window_extent(renderer)
def get_frame_on(self):
"""Get whether the legend box patch is drawn."""
return self.legendPatch.get_visible()
def set_frame_on(self, b):
"""
Set whether the legend box patch is drawn.
Parameters
----------
b : bool
"""
self.legendPatch.set_visible(b)
self.stale = True
draw_frame = set_frame_on # Backcompat alias.
def get_bbox_to_anchor(self):
"""Return the bbox that the legend will be anchored to."""
if self._bbox_to_anchor is None:
return self.parent.bbox
else:
return self._bbox_to_anchor
def set_bbox_to_anchor(self, bbox, transform=None):
"""
Set the bbox that the legend will be anchored to.
Parameters
----------
bbox : `~matplotlib.transforms.BboxBase` or tuple
The bounding box can be specified in the following ways:
- A `.BboxBase` instance
- A tuple of ``(left, bottom, width, height)`` in the given
transform (normalized axes coordinate if None)
- A tuple of ``(left, bottom)`` where the width and height will be
assumed to be zero.
- *None*, to remove the bbox anchoring, and use the parent bbox.
transform : `~matplotlib.transforms.Transform`, optional
A transform to apply to the bounding box. If not specified, this
will use a transform to the bounding box of the parent.
"""
if bbox is None:
self._bbox_to_anchor = None
return
elif isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError as err:
raise ValueError(f"Invalid bbox: {bbox}") from err
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
if transform is None:
transform = BboxTransformTo(self.parent.bbox)
self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor,
transform)
self.stale = True
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x, y) coordinate of the bbox.
Parameters
----------
loc : int
A location code in range(1, 11). This corresponds to the possible
values for ``self._loc``, excluding "best".
bbox : `~matplotlib.transforms.Bbox`
bbox to be placed, in display coordinates.
parentbbox : `~matplotlib.transforms.Bbox`
A parent box which will contain the bbox, in display coordinates.
"""
return offsetbox._get_anchored_bbox(
loc, bbox, parentbbox,
self.borderaxespad * renderer.points_to_pixels(self._fontsize))
def _find_best_position(self, width, height, renderer):
"""Determine the best location to place the legend."""
assert self.isaxes # always holds, as this is only called internally
start_time = time.perf_counter()
bboxes, lines, offsets = self._auto_legend_data(renderer)
bbox = Bbox.from_bounds(0, 0, width, height)
candidates = []
for idx in range(1, len(self.codes)):
l, b = self._get_anchored_bbox(idx, bbox,
self.get_bbox_to_anchor(),
renderer)
legendBox = Bbox.from_bounds(l, b, width, height)
# XXX TODO: If markers are present, it would be good to take them
# into account when checking vertex overlaps in the next line.
badness = (sum(legendBox.count_contains(line.vertices)
for line in lines)
+ legendBox.count_contains(offsets)
+ legendBox.count_overlaps(bboxes)
+ sum(line.intersects_bbox(legendBox, filled=False)
for line in lines))
# Include the index to favor lower codes in case of a tie.
candidates.append((badness, idx, (l, b)))
if badness == 0:
break
_, _, (l, b) = min(candidates)
if self._loc_used_default and time.perf_counter() - start_time > 1:
_api.warn_external(
'Creating legend with loc="best" can be slow with large '
'amounts of data.')
return l, b
def contains(self, mouseevent):
return self.legendPatch.contains(mouseevent)
def set_draggable(self, state, use_blit=False, update='loc'):
"""
Enable or disable mouse dragging support of the legend.
Parameters
----------
state : bool
Whether mouse dragging is enabled.
use_blit : bool, optional
Use blitting for faster image composition. For details see
:ref:`func-animation`.
update : {'loc', 'bbox'}, optional
The legend parameter to be changed when dragged:
- 'loc': update the *loc* parameter of the legend
- 'bbox': update the *bbox_to_anchor* parameter of the legend
Returns
-------
`.DraggableLegend` or *None*
If *state* is ``True`` this returns the `.DraggableLegend` helper
instance. Otherwise this returns *None*.
"""
if state:
if self._draggable is None:
self._draggable = DraggableLegend(self,
use_blit,
update=update)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
def get_draggable(self):
"""Return ``True`` if the legend is draggable, ``False`` otherwise."""
return self._draggable is not None
# Helper functions to parse legend arguments for both `figure.legend` and
# `axes.legend`:
def _get_legend_handles(axs, legend_handler_map=None):
"""Yield artists that can be used as handles in a legend."""
handles_original = []
for ax in axs:
handles_original += [
*(a for a in ax._children
if isinstance(a, (Line2D, Patch, Collection, Text))),
*ax.containers]
# support parasite Axes:
if hasattr(ax, 'parasites'):
for axx in ax.parasites:
handles_original += [
*(a for a in axx._children
if isinstance(a, (Line2D, Patch, Collection, Text))),
*axx.containers]
handler_map = {**Legend.get_default_handler_map(),
**(legend_handler_map or {})}
has_handler = Legend.get_legend_handler
for handle in handles_original:
label = handle.get_label()
if label != '_nolegend_' and has_handler(handler_map, handle):
yield handle
elif (label and not label.startswith('_') and
not has_handler(handler_map, handle)):
_api.warn_external(
"Legend does not support handles for "
f"{type(handle).__name__} "
"instances.\nSee: https://matplotlib.org/stable/"
"tutorials/intermediate/legend_guide.html"
"#implementing-a-custom-legend-handler")
continue
def _get_legend_handles_labels(axs, legend_handler_map=None):
"""Return handles and labels for legend."""
handles = []
labels = []
for handle in _get_legend_handles(axs, legend_handler_map):
label = handle.get_label()
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def _parse_legend_args(axs, *args, handles=None, labels=None, **kwargs):
"""
Get the handles and labels from the calls to either ``figure.legend``
or ``axes.legend``.
The parser is a bit involved because we support::
legend()
legend(labels)
legend(handles, labels)
legend(labels=labels)
legend(handles=handles)
legend(handles=handles, labels=labels)
The behavior for a mixture of positional and keyword handles and labels
is undefined and issues a warning; it will be an error in the future.
Parameters
----------
axs : list of `.Axes`
If handles are not given explicitly, the artists in these Axes are
used as handles.
*args : tuple
Positional parameters passed to ``legend()``.
handles
The value of the keyword argument ``legend(handles=...)``, or *None*
if that keyword argument was not used.
labels
The value of the keyword argument ``legend(labels=...)``, or *None*
if that keyword argument was not used.
**kwargs
All other keyword arguments passed to ``legend()``.
Returns
-------
handles : list of (`.Artist` or tuple of `.Artist`)
The legend handles.
labels : list of str
The legend labels.
kwargs : dict
*kwargs* with keywords handles and labels removed.
"""
log = logging.getLogger(__name__)
handlers = kwargs.get('handler_map')
if (handles is not None or labels is not None) and args:
_api.warn_deprecated("3.9", message=(
"You have mixed positional and keyword arguments, some input may "
"be discarded. This is deprecated since %(since)s and will "
"become an error in %(removal)s."))
if (hasattr(handles, "__len__") and
hasattr(labels, "__len__") and
len(handles) != len(labels)):
_api.warn_external(f"Mismatched number of handles and labels: "
f"len(handles) = {len(handles)} "
f"len(labels) = {len(labels)}")
# if got both handles and labels as kwargs, make same length
if handles and labels:
handles, labels = zip(*zip(handles, labels))
elif handles is not None and labels is None:
labels = [handle.get_label() for handle in handles]
elif labels is not None and handles is None:
# Get as many handles as there are labels.
handles = [handle for handle, label
in zip(_get_legend_handles(axs, handlers), labels)]
elif len(args) == 0: # 0 args: automatically detect labels and handles.
handles, labels = _get_legend_handles_labels(axs, handlers)
if not handles:
_api.warn_external(
"No artists with labels found to put in legend. Note that "
"artists whose label start with an underscore are ignored "
"when legend() is called with no argument.")
elif len(args) == 1: # 1 arg: user defined labels, automatic handle detection.
labels, = args
if any(isinstance(l, Artist) for l in labels):
raise TypeError("A single argument passed to legend() must be a "
"list of labels, but found an Artist in there.")
# Get as many handles as there are labels.
handles = [handle for handle, label
in zip(_get_legend_handles(axs, handlers), labels)]
elif len(args) == 2: # 2 args: user defined handles and labels.
handles, labels = args[:2]
else:
raise _api.nargs_error('legend', '0-2', len(args))
return handles, labels, kwargs
"""
Default legend handlers.
.. important::
This is a low-level legend API, which most end users do not need.
We recommend that you are familiar with the :ref:`legend guide
` before reading this documentation.
Legend handlers are expected to be a callable object with a following
signature::
legend_handler(legend, orig_handle, fontsize, handlebox)
Where *legend* is the legend itself, *orig_handle* is the original
plot, *fontsize* is the fontsize in pixels, and *handlebox* is an
`.OffsetBox` instance. Within the call, you should create relevant
artists (using relevant properties from the *legend* and/or
*orig_handle*) and add them into the *handlebox*. The artists need to
be scaled according to the *fontsize* (note that the size is in pixels,
i.e., this is dpi-scaled value).
This module includes definition of several legend handler classes
derived from the base class (HandlerBase) with the following method::
def legend_artist(self, legend, orig_handle, fontsize, handlebox)
"""
from itertools import cycle
import numpy as np
from matplotlib import cbook
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
import matplotlib.collections as mcoll
def update_from_first_child(tgt, src):
first_child = next(iter(src.get_children()), None)
if first_child is not None:
tgt.update_from(first_child)
class HandlerBase:
"""
A base class for default legend handlers.
The derived classes are meant to override *create_artists* method, which
has the following signature::
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
The overridden method needs to create artists of the given
transform that fits in the given dimension (xdescent, ydescent,
width, height) that are scaled by fontsize if necessary.
"""
def __init__(self, xpad=0., ypad=0., update_func=None):
"""
Parameters
----------
xpad : float, optional
Padding in x-direction.
ypad : float, optional
Padding in y-direction.
update_func : callable, optional
Function for updating the legend handler properties from another
legend handler, used by `~HandlerBase.update_prop`.
"""
self._xpad, self._ypad = xpad, ypad
self._update_prop_func = update_func
def _update_prop(self, legend_handle, orig_handle):
if self._update_prop_func is None:
self._default_update_prop(legend_handle, orig_handle)
else:
self._update_prop_func(legend_handle, orig_handle)
def _default_update_prop(self, legend_handle, orig_handle):
legend_handle.update_from(orig_handle)
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def adjust_drawing_area(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
):
xdescent = xdescent - self._xpad * fontsize
ydescent = ydescent - self._ypad * fontsize
width = width - self._xpad * fontsize
height = height - self._ypad * fontsize
return xdescent, ydescent, width, height
def legend_artist(self, legend, orig_handle,
fontsize, handlebox):
"""
Return the artist that this HandlerBase generates for the given
original artist/handle.
Parameters
----------
legend : `~matplotlib.legend.Legend`
The legend for which these legend artists are being created.
orig_handle : :class:`matplotlib.artist.Artist` or similar
The object for which these legend artists are being created.
fontsize : int
The fontsize in pixels. The artists being created should
be scaled according to the given fontsize.
handlebox : `~matplotlib.offsetbox.OffsetBox`
The box which has been created to hold this legend entry's
artists. Artists created in the `legend_artist` method must
be added to this handlebox inside this method.
"""
xdescent, ydescent, width, height = self.adjust_drawing_area(
legend, orig_handle,
handlebox.xdescent, handlebox.ydescent,
handlebox.width, handlebox.height,
fontsize)
artists = self.create_artists(legend, orig_handle,
xdescent, ydescent, width, height,
fontsize, handlebox.get_transform())
# create_artists will return a list of artists.
for a in artists:
handlebox.add_artist(a)
# we only return the first artist
return artists[0]
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
"""
Return the legend artists generated.
Parameters
----------
legend : `~matplotlib.legend.Legend`
The legend for which these legend artists are being created.
orig_handle : `~matplotlib.artist.Artist` or similar
The object for which these legend artists are being created.
xdescent, ydescent, width, height : int
The rectangle (*xdescent*, *ydescent*, *width*, *height*) that the
legend artists being created should fit within.
fontsize : int
The fontsize in pixels. The legend artists being created should
be scaled according to the given fontsize.
trans : `~matplotlib.transforms.Transform`
The transform that is applied to the legend artists being created.
Typically from unit coordinates in the handler box to screen
coordinates.
"""
raise NotImplementedError('Derived must override')
class HandlerNpoints(HandlerBase):
"""
A legend handler that shows *numpoints* points in the legend entry.
"""
def __init__(self, marker_pad=0.3, numpoints=None, **kwargs):
"""
Parameters
----------
marker_pad : float
Padding between points in legend entry.
numpoints : int
Number of points to show in legend entry.
**kwargs
Keyword arguments forwarded to `.HandlerBase`.
"""
super().__init__(**kwargs)
self._numpoints = numpoints
self._marker_pad = marker_pad
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.numpoints
else:
return self._numpoints
def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):
numpoints = self.get_numpoints(legend)
if numpoints > 1:
# we put some pad here to compensate the size of the marker
pad = self._marker_pad * fontsize
xdata = np.linspace(-xdescent + pad,
-xdescent + width - pad,
numpoints)
xdata_marker = xdata
else:
xdata = [-xdescent, -xdescent + width]
xdata_marker = [-xdescent + 0.5 * width]
return xdata, xdata_marker
class HandlerNpointsYoffsets(HandlerNpoints):
"""
A legend handler that shows *numpoints* in the legend, and allows them to
be individually offset in the y-direction.
"""
def __init__(self, numpoints=None, yoffsets=None, **kwargs):
"""
Parameters
----------
numpoints : int
Number of points to show in legend entry.
yoffsets : array of floats
Length *numpoints* list of y offsets for each point in
legend entry.
**kwargs
Keyword arguments forwarded to `.HandlerNpoints`.
"""
super().__init__(numpoints=numpoints, **kwargs)
self._yoffsets = yoffsets
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * legend._scatteryoffsets
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
class HandlerLine2DCompound(HandlerNpoints):
"""
Original handler for `.Line2D` instances, that relies on combining
a line-only with a marker-only artist. May be deprecated in the future.
"""
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
# docstring inherited
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = np.full_like(xdata, ((height - ydescent) / 2))
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_drawstyle('default')
legline.set_marker("")
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(legline_marker, orig_handle, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correspondence.
legline._legmarker = legline_marker
legline.set_transform(trans)
legline_marker.set_transform(trans)
return [legline, legline_marker]
class HandlerLine2D(HandlerNpoints):
"""
Handler for `.Line2D` instances.
See Also
--------
HandlerLine2DCompound : An earlier handler implementation, which used one
artist for the line and another for the marker(s).
"""
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
# docstring inherited
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
markevery = None
if self.get_numpoints(legend) == 1:
# Special case: one wants a single marker in the center
# and a line that extends on both sides. One will use a
# 3 points line, but only mark the #1 (i.e. middle) point.
xdata = np.linspace(xdata[0], xdata[-1], 3)
markevery = [1]
ydata = np.full_like(xdata, (height - ydescent) / 2)
legline = Line2D(xdata, ydata, markevery=markevery)
self.update_prop(legline, orig_handle, legend)
if legend.markerscale != 1:
newsz = legline.get_markersize() * legend.markerscale
legline.set_markersize(newsz)
legline.set_transform(trans)
return [legline]
class HandlerPatch(HandlerBase):
"""
Handler for `.Patch` instances.
"""
def __init__(self, patch_func=None, **kwargs):
"""
Parameters
----------
patch_func : callable, optional
The function that creates the legend key artist.
*patch_func* should have the signature::
def patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
Subsequently, the created artist will have its ``update_prop``
method called and the appropriate transform will be applied.
**kwargs
Keyword arguments forwarded to `.HandlerBase`.
"""
super().__init__(**kwargs)
self._patch_func = patch_func
def _create_patch(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._patch_func is None:
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
else:
p = self._patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# docstring inherited
p = self._create_patch(legend, orig_handle,
xdescent, ydescent, width, height, fontsize)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
class HandlerStepPatch(HandlerBase):
"""
Handler for `~.matplotlib.patches.StepPatch` instances.
"""
@staticmethod
def _create_patch(orig_handle, xdescent, ydescent, width, height):
return Rectangle(xy=(-xdescent, -ydescent), width=width,
height=height, color=orig_handle.get_facecolor())
@staticmethod
def _create_line(orig_handle, width, height):
# Unfilled StepPatch should show as a line
legline = Line2D([0, width], [height/2, height/2],
color=orig_handle.get_edgecolor(),
linestyle=orig_handle.get_linestyle(),
linewidth=orig_handle.get_linewidth(),
)
# Overwrite manually because patch and line properties don't mix
legline.set_drawstyle('default')
legline.set_marker("")
return legline
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# docstring inherited
if orig_handle.get_fill() or (orig_handle.get_hatch() is not None):
p = self._create_patch(orig_handle, xdescent, ydescent, width,
height)
self.update_prop(p, orig_handle, legend)
else:
p = self._create_line(orig_handle, width, height)
p.set_transform(trans)
return [p]
class HandlerLineCollection(HandlerLine2D):
"""
Handler for `.LineCollection` instances.
"""
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def _default_update_prop(self, legend_handle, orig_handle):
lw = orig_handle.get_linewidths()[0]
dashes = orig_handle._us_linestyles[0]
color = orig_handle.get_colors()[0]
legend_handle.set_color(color)
legend_handle.set_linestyle(dashes)
legend_handle.set_linewidth(lw)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# docstring inherited
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = np.full_like(xdata, (height - ydescent) / 2)
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_transform(trans)
return [legline]
class HandlerRegularPolyCollection(HandlerNpointsYoffsets):
r"""Handler for `.RegularPolyCollection`\s."""
def __init__(self, yoffsets=None, sizes=None, **kwargs):
super().__init__(yoffsets=yoffsets, **kwargs)
self._sizes = sizes
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def get_sizes(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._sizes is None:
handle_sizes = orig_handle.get_sizes()
if not len(handle_sizes):
handle_sizes = [1]
size_max = max(handle_sizes) * legend.markerscale ** 2
size_min = min(handle_sizes) * legend.markerscale ** 2
numpoints = self.get_numpoints(legend)
if numpoints < 4:
sizes = [.5 * (size_max + size_min), size_max,
size_min][:numpoints]
else:
rng = (size_max - size_min)
sizes = rng * np.linspace(0, 1, numpoints) + size_min
else:
sizes = self._sizes
return sizes
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend_handle.set_figure(legend.get_figure(root=False))
# legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
return type(orig_handle)(
orig_handle.get_numsides(),
rotation=orig_handle.get_rotation(), sizes=sizes,
offsets=offsets, offset_transform=offset_transform,
)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
# docstring inherited
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,
width, height, fontsize)
p = self.create_collection(
orig_handle, sizes,
offsets=list(zip(xdata_marker, ydata)), offset_transform=trans)
self.update_prop(p, orig_handle, legend)
p.set_offset_transform(trans)
return [p]
class HandlerPathCollection(HandlerRegularPolyCollection):
r"""Handler for `.PathCollection`\s, which are used by `~.Axes.scatter`."""
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
return type(orig_handle)(
[orig_handle.get_paths()[0]], sizes=sizes,
offsets=offsets, offset_transform=offset_transform,
)
class HandlerCircleCollection(HandlerRegularPolyCollection):
r"""Handler for `.CircleCollection`\s."""
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
return type(orig_handle)(
sizes, offsets=offsets, offset_transform=offset_transform)
class HandlerErrorbar(HandlerLine2D):
"""Handler for Errorbars."""
def __init__(self, xerr_size=0.5, yerr_size=None,
marker_pad=0.3, numpoints=None, **kwargs):
self._xerr_size = xerr_size
self._yerr_size = yerr_size
super().__init__(marker_pad=marker_pad, numpoints=numpoints, **kwargs)
def get_err_size(self, legend, xdescent, ydescent,
width, height, fontsize):
xerr_size = self._xerr_size * fontsize
if self._yerr_size is None:
yerr_size = xerr_size
else:
yerr_size = self._yerr_size * fontsize
return xerr_size, yerr_size
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
# docstring inherited
plotlines, caplines, barlinecols = orig_handle
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = np.full_like(xdata, (height - ydescent) / 2)
legline = Line2D(xdata, ydata)
xdata_marker = np.asarray(xdata_marker)
ydata_marker = np.asarray(ydata[:len(xdata_marker)])
xerr_size, yerr_size = self.get_err_size(legend, xdescent, ydescent,
width, height, fontsize)
legline_marker = Line2D(xdata_marker, ydata_marker)
# when plotlines are None (only errorbars are drawn), we just
# make legline invisible.
if plotlines is None:
legline.set_visible(False)
legline_marker.set_visible(False)
else:
self.update_prop(legline, plotlines, legend)
legline.set_drawstyle('default')
legline.set_marker('none')
self.update_prop(legline_marker, plotlines, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
handle_barlinecols = []
handle_caplines = []
if orig_handle.has_xerr:
verts = [((x - xerr_size, y), (x + xerr_size, y))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker - xerr_size, ydata_marker)
capline_right = Line2D(xdata_marker + xerr_size, ydata_marker)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("|")
capline_right.set_marker("|")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
if orig_handle.has_yerr:
verts = [((x, y - yerr_size), (x, y + yerr_size))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker, ydata_marker - yerr_size)
capline_right = Line2D(xdata_marker, ydata_marker + yerr_size)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("_")
capline_right.set_marker("_")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
artists = [
*handle_barlinecols, *handle_caplines, legline, legline_marker,
]
for artist in artists:
artist.set_transform(trans)
return artists
class HandlerStem(HandlerNpointsYoffsets):
"""
Handler for plots produced by `~.Axes.stem`.
"""
def __init__(self, marker_pad=0.3, numpoints=None,
bottom=None, yoffsets=None, **kwargs):
"""
Parameters
----------
marker_pad : float, default: 0.3
Padding between points in legend entry.
numpoints : int, optional
Number of points to show in legend entry.
bottom : float, optional
yoffsets : array of floats, optional
Length *numpoints* list of y offsets for each point in
legend entry.
**kwargs
Keyword arguments forwarded to `.HandlerNpointsYoffsets`.
"""
super().__init__(marker_pad=marker_pad, numpoints=numpoints,
yoffsets=yoffsets, **kwargs)
self._bottom = bottom
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * (0.5 * legend._scatteryoffsets + 0.5)
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
# docstring inherited
markerline, stemlines, baseline = orig_handle
# Check to see if the stemcontainer is storing lines as a list or a
# LineCollection. Eventually using a list will be removed, and this
# logic can also be removed.
using_linecoll = isinstance(stemlines, mcoll.LineCollection)
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
if self._bottom is None:
bottom = 0.
else:
bottom = self._bottom
leg_markerline = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(leg_markerline, markerline, legend)
leg_stemlines = [Line2D([x, x], [bottom, y])
for x, y in zip(xdata_marker, ydata)]
if using_linecoll:
# change the function used by update_prop() from the default
# to one that handles LineCollection
with cbook._setattr_cm(
self, _update_prop_func=self._copy_collection_props):
for line in leg_stemlines:
self.update_prop(line, stemlines, legend)
else:
for lm, m in zip(leg_stemlines, stemlines):
self.update_prop(lm, m, legend)
leg_baseline = Line2D([np.min(xdata), np.max(xdata)],
[bottom, bottom])
self.update_prop(leg_baseline, baseline, legend)
artists = [*leg_stemlines, leg_baseline, leg_markerline]
for artist in artists:
artist.set_transform(trans)
return artists
def _copy_collection_props(self, legend_handle, orig_handle):
"""
Copy properties from the `.LineCollection` *orig_handle* to the
`.Line2D` *legend_handle*.
"""
legend_handle.set_color(orig_handle.get_color()[0])
legend_handle.set_linestyle(orig_handle.get_linestyle()[0])
class HandlerTuple(HandlerBase):
"""
Handler for Tuple.
"""
def __init__(self, ndivide=1, pad=None, **kwargs):
"""
Parameters
----------
ndivide : int or None, default: 1
The number of sections to divide the legend area into. If None,
use the length of the input tuple.
pad : float, default: :rc:`legend.borderpad`
Padding in units of fraction of font size.
**kwargs
Keyword arguments forwarded to `.HandlerBase`.
"""
self._ndivide = ndivide
self._pad = pad
super().__init__(**kwargs)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
# docstring inherited
handler_map = legend.get_legend_handler_map()
if self._ndivide is None:
ndivide = len(orig_handle)
else:
ndivide = self._ndivide
if self._pad is None:
pad = legend.borderpad * fontsize
else:
pad = self._pad * fontsize
if ndivide > 1:
width = (width - pad * (ndivide - 1)) / ndivide
xds_cycle = cycle(xdescent - (width + pad) * np.arange(ndivide))
a_list = []
for handle1 in orig_handle:
handler = legend.get_legend_handler(handler_map, handle1)
_a_list = handler.create_artists(
legend, handle1,
next(xds_cycle), ydescent, width, height, fontsize, trans)
a_list.extend(_a_list)
return a_list
class HandlerPolyCollection(HandlerBase):
"""
Handler for `.PolyCollection` used in `~.Axes.fill_between` and
`~.Axes.stackplot`.
"""
def _update_prop(self, legend_handle, orig_handle):
def first_color(colors):
if colors.size == 0:
return (0, 0, 0, 0)
return tuple(colors[0])
def get_first(prop_array):
if len(prop_array):
return prop_array[0]
else:
return None
# orig_handle is a PolyCollection and legend_handle is a Patch.
# Directly set Patch color attributes (must be RGBA tuples).
legend_handle._facecolor = first_color(orig_handle.get_facecolor())
legend_handle._edgecolor = first_color(orig_handle.get_edgecolor())
legend_handle._original_facecolor = orig_handle._original_facecolor
legend_handle._original_edgecolor = orig_handle._original_edgecolor
legend_handle._fill = orig_handle.get_fill()
legend_handle._hatch = orig_handle.get_hatch()
# Hatch color is anomalous in having no getters and setters.
legend_handle._hatch_color = orig_handle._hatch_color
# Setters are fine for the remaining attributes.
legend_handle.set_linewidth(get_first(orig_handle.get_linewidths()))
legend_handle.set_linestyle(get_first(orig_handle.get_linestyles()))
legend_handle.set_transform(get_first(orig_handle.get_transforms()))
# Alpha is already taken into account by the color attributes.
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# docstring inherited
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
venv\Lib\site-packages\matplotlib\lines.py
"""
2D lines with support for a variety of line styles, markers, colors, etc.
"""
import copy
from numbers import Integral, Number, Real
import logging
import numpy as np
import matplotlib as mpl
from . import _api, cbook, colors as mcolors, _docstring
from .artist import Artist, allow_rasterization
from .cbook import (
_to_unmasked_float_array, ls_mapper, ls_mapper_r, STEP_LOOKUP_MAP)
from .markers import MarkerStyle
from .path import Path
from .transforms import Bbox, BboxTransformTo, TransformedPath
from ._enums import JoinStyle, CapStyle
# Imported here for backward compatibility, even though they don't
# really belong.
from . import _path
from .markers import ( # noqa
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE,
TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN)
_log = logging.getLogger(__name__)
def _get_dash_pattern(style):
"""Convert linestyle to dash pattern."""
# go from short hand -> full strings
if isinstance(style, str):
style = ls_mapper.get(style, style)
# un-dashed styles
if style in ['solid', 'None']:
offset = 0
dashes = None
# dashed styles
elif style in ['dashed', 'dashdot', 'dotted']:
offset = 0
dashes = tuple(mpl.rcParams[f'lines.{style}_pattern'])
#
elif isinstance(style, tuple):
offset, dashes = style
if offset is None:
raise ValueError(f'Unrecognized linestyle: {style!r}')
else:
raise ValueError(f'Unrecognized linestyle: {style!r}')
# normalize offset to be positive and shorter than the dash cycle
if dashes is not None:
dsum = sum(dashes)
if dsum:
offset %= dsum
return offset, dashes
def _get_dash_patterns(styles):
"""Convert linestyle or sequence of linestyles to list of dash patterns."""
try:
patterns = [_get_dash_pattern(styles)]
except ValueError:
try:
patterns = [_get_dash_pattern(x) for x in styles]
except ValueError as err:
emsg = f'Do not know how to convert {styles!r} to dashes'
raise ValueError(emsg) from err
return patterns
def _get_inverse_dash_pattern(offset, dashes):
"""Return the inverse of the given dash pattern, for filling the gaps."""
# Define the inverse pattern by moving the last gap to the start of the
# sequence.
gaps = dashes[-1:] + dashes[:-1]
# Set the offset so that this new first segment is skipped
# (see backend_bases.GraphicsContextBase.set_dashes for offset definition).
offset_gaps = offset + dashes[-1]
return offset_gaps, gaps
def _scale_dashes(offset, dashes, lw):
if not mpl.rcParams['lines.scale_dashes']:
return offset, dashes
scaled_offset = offset * lw
scaled_dashes = ([x * lw if x is not None else None for x in dashes]
if dashes is not None else None)
return scaled_offset, scaled_dashes
def segment_hits(cx, cy, x, y, radius):
"""
Return the indices of the segments in the polyline with coordinates (*cx*,
*cy*) that are within a distance *radius* of the point (*x*, *y*).
"""
# Process single points specially
if len(x) <= 1:
res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)
return res
# We need to lop the last element off a lot.
xr, yr = x[:-1], y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx, dy = x[1:] - xr, y[1:] - yr
Lnorm_sq = dx ** 2 + dy ** 2 # Possibly want to eliminate Lnorm==0
u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq
candidates = (u >= 0) & (u <= 1)
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px, py = xr + u * dx, yr + u * dy
line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
return np.concatenate((points, lines))
def _mark_every_path(markevery, tpath, affine, ax):
"""
Helper function that sorts out how to deal the input
`markevery` and returns the points where markers should be drawn.
Takes in the `markevery` value and the line path and returns the
sub-sampled path.
"""
# pull out the two bits of data we want from the path
codes, verts = tpath.codes, tpath.vertices
def _slice_or_none(in_v, slc):
"""Helper function to cope with `codes` being an ndarray or `None`."""
if in_v is None:
return None
return in_v[slc]
# if just an int, assume starting at 0 and make a tuple
if isinstance(markevery, Integral):
markevery = (0, markevery)
# if just a float, assume starting at 0.0 and make a tuple
elif isinstance(markevery, Real):
markevery = (0.0, markevery)
if isinstance(markevery, tuple):
if len(markevery) != 2:
raise ValueError('`markevery` is a tuple but its len is not 2; '
f'markevery={markevery}')
start, step = markevery
# if step is an int, old behavior
if isinstance(step, Integral):
# tuple of 2 int is for backwards compatibility,
if not isinstance(start, Integral):
raise ValueError(
'`markevery` is a tuple with len 2 and second element is '
'an int, but the first element is not an int; '
f'markevery={markevery}')
# just return, we are done here
return Path(verts[slice(start, None, step)],
_slice_or_none(codes, slice(start, None, step)))
elif isinstance(step, Real):
if not isinstance(start, Real):
raise ValueError(
'`markevery` is a tuple with len 2 and second element is '
'a float, but the first element is not a float or an int; '
f'markevery={markevery}')
if ax is None:
raise ValueError(
"markevery is specified relative to the Axes size, but "
"the line does not have a Axes as parent")
# calc cumulative distance along path (in display coords):
fin = np.isfinite(verts).all(axis=1)
fverts = verts[fin]
disp_coords = affine.transform(fverts)
delta = np.empty((len(disp_coords), 2))
delta[0, :] = 0
delta[1:, :] = disp_coords[1:, :] - disp_coords[:-1, :]
delta = np.hypot(*delta.T).cumsum()
# calc distance between markers along path based on the Axes
# bounding box diagonal being a distance of unity:
(x0, y0), (x1, y1) = ax.transAxes.transform([[0, 0], [1, 1]])
scale = np.hypot(x1 - x0, y1 - y0)
marker_delta = np.arange(start * scale, delta[-1], step * scale)
# find closest actual data point that is closest to
# the theoretical distance along the path:
inds = np.abs(delta[np.newaxis, :] - marker_delta[:, np.newaxis])
inds = inds.argmin(axis=1)
inds = np.unique(inds)
# return, we are done here
return Path(fverts[inds], _slice_or_none(codes, inds))
else:
raise ValueError(
f"markevery={markevery!r} is a tuple with len 2, but its "
f"second element is not an int or a float")
elif isinstance(markevery, slice):
# mazol tov, it's already a slice, just return
return Path(verts[markevery], _slice_or_none(codes, markevery))
elif np.iterable(markevery):
# fancy indexing
try:
return Path(verts[markevery], _slice_or_none(codes, markevery))
except (ValueError, IndexError) as err:
raise ValueError(
f"markevery={markevery!r} is iterable but not a valid numpy "
f"fancy index") from err
else:
raise ValueError(f"markevery={markevery!r} is not a recognized value")
@_docstring.interpd
@_api.define_aliases({
"antialiased": ["aa"],
"color": ["c"],
"drawstyle": ["ds"],
"linestyle": ["ls"],
"linewidth": ["lw"],
"markeredgecolor": ["mec"],
"markeredgewidth": ["mew"],
"markerfacecolor": ["mfc"],
"markerfacecoloralt": ["mfcalt"],
"markersize": ["ms"],
})
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, e.g., one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-': '_draw_solid',
'--': '_draw_dashed',
'-.': '_draw_dash_dot',
':': '_draw_dotted',
'None': '_draw_nothing',
' ': '_draw_nothing',
'': '_draw_nothing',
}
_drawStyles_l = {
'default': '_draw_lines',
'steps-mid': '_draw_steps_mid',
'steps-pre': '_draw_steps_pre',
'steps-post': '_draw_steps_post',
}
_drawStyles_s = {
'steps': '_draw_steps_pre',
}
# drawStyles should now be deprecated.
drawStyles = {**_drawStyles_l, **_drawStyles_s}
# Need a list ordered with long names first:
drawStyleKeys = [*_drawStyles_l, *_drawStyles_s]
# Referenced here to maintain API. These are defined in
# MarkerStyle
markers = MarkerStyle.markers
filled_markers = MarkerStyle.filled_markers
fillStyles = MarkerStyle.fillstyles
zorder = 2
_subslice_optim_min_size = 1000
def __str__(self):
if self._label != "":
return f"Line2D({self._label})"
elif self._x is None:
return "Line2D()"
elif len(self._x) > 3:
return "Line2D(({:g},{:g}),({:g},{:g}),...,({:g},{:g}))".format(
self._x[0], self._y[0],
self._x[1], self._y[1],
self._x[-1], self._y[-1])
else:
return "Line2D(%s)" % ",".join(
map("({:g},{:g})".format, self._x, self._y))
def __init__(self, xdata, ydata, *,
linewidth=None, # all Nones default to rc
linestyle=None,
color=None,
gapcolor=None,
marker=None,
markersize=None,
markeredgewidth=None,
markeredgecolor=None,
markerfacecolor=None,
markerfacecoloralt='none',
fillstyle=None,
antialiased=None,
dash_capstyle=None,
solid_capstyle=None,
dash_joinstyle=None,
solid_joinstyle=None,
pickradius=5,
drawstyle=None,
markevery=None,
**kwargs
):
"""
Create a `.Line2D` instance with *x* and *y* data in sequences of
*xdata*, *ydata*.
Additional keyword arguments are `.Line2D` properties:
%(Line2D:kwdoc)s
See :meth:`set_linestyle` for a description of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
super().__init__()
# Convert sequences to NumPy arrays.
if not np.iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not np.iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None:
linewidth = mpl.rcParams['lines.linewidth']
if linestyle is None:
linestyle = mpl.rcParams['lines.linestyle']
if marker is None:
marker = mpl.rcParams['lines.marker']
if color is None:
color = mpl.rcParams['lines.color']
if markersize is None:
markersize = mpl.rcParams['lines.markersize']
if antialiased is None:
antialiased = mpl.rcParams['lines.antialiased']
if dash_capstyle is None:
dash_capstyle = mpl.rcParams['lines.dash_capstyle']
if dash_joinstyle is None:
dash_joinstyle = mpl.rcParams['lines.dash_joinstyle']
if solid_capstyle is None:
solid_capstyle = mpl.rcParams['lines.solid_capstyle']
if solid_joinstyle is None:
solid_joinstyle = mpl.rcParams['lines.solid_joinstyle']
if drawstyle is None:
drawstyle = 'default'
self._dashcapstyle = None
self._dashjoinstyle = None
self._solidjoinstyle = None
self._solidcapstyle = None
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self._linestyles = None
self._drawstyle = None
self._linewidth = linewidth
self._unscaled_dash_pattern = (0, None) # offset, dash
self._dash_pattern = (0, None) # offset, dash (scaled by linewidth)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self._color = None
self.set_color(color)
if marker is None:
marker = 'none' # Default.
if not isinstance(marker, MarkerStyle):
self._marker = MarkerStyle(marker, fillstyle)
else:
self._marker = marker
self._gapcolor = None
self.set_gapcolor(gapcolor)
self._markevery = None
self._markersize = None
self._antialiased = None
self.set_markevery(markevery)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._markeredgecolor = None
self._markeredgewidth = None
self._markerfacecolor = None
self._markerfacecoloralt = None
self.set_markerfacecolor(markerfacecolor) # Normalizes None to rc.
self.set_markerfacecoloralt(markerfacecoloralt)
self.set_markeredgecolor(markeredgecolor) # Normalizes None to rc.
self.set_markeredgewidth(markeredgewidth)
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self._internal_update(kwargs)
self.pickradius = pickradius
self.ind_offset = 0
if (isinstance(self._picker, Number) and
not isinstance(self._picker, bool)):
self._pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalidx = True
self._invalidy = True
self._x = None
self._y = None
self._xy = None
self._path = None
self._transformed_path = None
self._subslice = False
self._x_filled = None # used in subslicing; only x is needed
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether *mouseevent* occurred on the line.
An event is deemed to have occurred "on" the line if it is less
than ``self.pickradius`` (default: 5 points) away from it. Use
`~.Line2D.get_pickradius` or `~.Line2D.set_pickradius` to get or set
the pick radius.
Parameters
----------
mouseevent : `~matplotlib.backend_bases.MouseEvent`
Returns
-------
contains : bool
Whether any values are within the radius.
details : dict
A dictionary ``{'ind': pointlist}``, where *pointlist* is a
list of points of the line that are within the pickradius around
the event position.
TODO: sort returned indices by distance
"""
if self._different_canvas(mouseevent):
return False, {}
# Make sure we have data to plot
if self._invalidy or self._invalidx:
self.recache()
if len(self._xy) == 0:
return False, {}
# Convert points to pixels
transformed_path = self._get_transformed_path()
path, affine = transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
fig = self.get_figure(root=True)
if fig is None:
_log.warning('no figure set when check if mouse is on line')
pixels = self._pickradius
else:
pixels = fig.dpi / 72. * self._pickradius
# The math involved in checking for containment (here and inside of
# segment_hits) assumes that it is OK to overflow, so temporarily set
# the error flags accordingly.
with np.errstate(all='ignore'):
# Check for collision
if self._linestyle in ['None', None]:
# If no line, return the nearby point(s)
ind, = np.nonzero(
(xt - mouseevent.x) ** 2 + (yt - mouseevent.y) ** 2
<= pixels ** 2)
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels)
if self._drawstyle.startswith("steps"):
ind //= 2
ind += self.ind_offset
# Return the point(s) within radius
return len(ind) > 0, dict(ind=ind)
def get_pickradius(self):
"""
Return the pick radius used for containment tests.
See `.contains` for more details.
"""
return self._pickradius
def set_pickradius(self, pickradius):
"""
Set the pick radius used for containment tests.
See `.contains` for more details.
Parameters
----------
pickradius : float
Pick radius, in points.
"""
if not isinstance(pickradius, Real) or pickradius < 0:
raise ValueError("pick radius should be a distance")
self._pickradius = pickradius
pickradius = property(get_pickradius, set_pickradius)
def get_fillstyle(self):
"""
Return the marker fill style.
See also `~.Line2D.set_fillstyle`.
"""
return self._marker.get_fillstyle()
def set_fillstyle(self, fs):
"""
Set the marker fill style.
Parameters
----------
fs : {'full', 'left', 'right', 'bottom', 'top', 'none'}
Possible values:
- 'full': Fill the whole marker with the *markerfacecolor*.
- 'left', 'right', 'bottom', 'top': Fill the marker half at
the given side with the *markerfacecolor*. The other
half of the marker is filled with *markerfacecoloralt*.
- 'none': No filling.
For examples see :ref:`marker_fill_styles`.
"""
self.set_marker(MarkerStyle(self._marker.get_marker(), fs))
self.stale = True
def set_markevery(self, every):
"""
Set the markevery property to subsample the plot when using markers.
e.g., if ``every=5``, every 5-th marker will be plotted.
Parameters
----------
every : None or int or (int, int) or slice or list[int] or float or \
(float, float) or list[bool]
Which markers to plot.
- ``every=None``: every point will be plotted.
- ``every=N``: every N-th marker will be plotted starting with
marker 0.
- ``every=(start, N)``: every N-th marker, starting at index
*start*, will be plotted.
- ``every=slice(start, end, N)``: every N-th marker, starting at
index *start*, up to but not including index *end*, will be
plotted.
- ``every=[i, j, m, ...]``: only markers at the given indices
will be plotted.
- ``every=[True, False, True, ...]``: only positions that are True
will be plotted. The list must have the same length as the data
points.
- ``every=0.1``, (i.e. a float): markers will be spaced at
approximately equal visual distances along the line; the distance
along the line between markers is determined by multiplying the
display-coordinate distance of the Axes bounding-box diagonal
by the value of *every*.
- ``every=(0.5, 0.1)`` (i.e. a length-2 tuple of float): similar
to ``every=0.1`` but the first marker will be offset along the
line by 0.5 multiplied by the
display-coordinate-diagonal-distance along the line.
For examples see
:doc:`/gallery/lines_bars_and_markers/markevery_demo`.
Notes
-----
Setting *markevery* will still only draw markers at actual data points.
While the float argument form aims for uniform visual spacing, it has
to coerce from the ideal spacing to the nearest available data point.
Depending on the number and distribution of data points, the result
may still not look evenly spaced.
When using a start offset to specify the first marker, the offset will
be from the first data point which may be different from the first
the visible data point if the plot is zoomed in.
If zooming in on a plot when using float arguments then the actual
data points that have markers will change because the distance between
markers is always determined from the display-coordinates
axes-bounding-box-diagonal regardless of the actual axes data limits.
"""
self._markevery = every
self.stale = True
def get_markevery(self):
"""
Return the markevery setting for marker subsampling.
See also `~.Line2D.set_markevery`.
"""
return self._markevery
def set_picker(self, p):
"""
Set the event picker details for the line.
Parameters
----------
p : float or callable[[Artist, Event], tuple[bool, dict]]
If a float, it is used as the pick radius in points.
"""
if not callable(p):
self.set_pickradius(p)
self._picker = p
def get_bbox(self):
"""Get the bounding box of this line."""
bbox = Bbox([[0, 0], [0, 0]])
bbox.update_from_data_xy(self.get_xydata())
return bbox
def get_window_extent(self, renderer=None):
bbox = Bbox([[0, 0], [0, 0]])
trans_data_to_xy = self.get_transform().transform
bbox.update_from_data_xy(trans_data_to_xy(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker:
ms = (self._markersize / 72.0 * self.get_figure(root=True).dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_data(self, *args):
"""
Set the x and y data.
Parameters
----------
*args : (2, N) array or two 1D arrays
See Also
--------
set_xdata
set_ydata
"""
if len(args) == 1:
(x, y), = args
else:
x, y = args
self.set_xdata(x)
self.set_ydata(y)
def recache_always(self):
self.recache(always=True)
def recache(self, always=False):
if always or self._invalidx:
xconv = self.convert_xunits(self._xorig)
x = _to_unmasked_float_array(xconv).ravel()
else:
x = self._x
if always or self._invalidy:
yconv = self.convert_yunits(self._yorig)
y = _to_unmasked_float_array(yconv).ravel()
else:
y = self._y
self._xy = np.column_stack(np.broadcast_arrays(x, y)).astype(float)
self._x, self._y = self._xy.T # views
self._subslice = False
if (self.axes
and len(x) > self._subslice_optim_min_size
and _path.is_sorted_and_has_non_nan(x)
and self.axes.name == 'rectilinear'
and self.axes.get_xscale() == 'linear'
and self._markevery is None
and self.get_clip_on()
and self.get_transform() == self.axes.transData):
self._subslice = True
nanmask = np.isnan(x)
if nanmask.any():
self._x_filled = self._x.copy()
indices = np.arange(len(x))
self._x_filled[nanmask] = np.interp(
indices[nanmask], indices[~nanmask], self._x[~nanmask])
else:
self._x_filled = self._x
if self._path is not None:
interpolation_steps = self._path._interpolation_steps
else:
interpolation_steps = 1
xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy.T)
self._path = Path(np.asarray(xy).T,
_interpolation_steps=interpolation_steps)
self._transformed_path = None
self._invalidx = False
self._invalidy = False
def _transform_path(self, subslice=None):
"""
Put a TransformedPath instance at self._transformed_path;
all invalidation of the transform is then handled by the
TransformedPath instance.
"""
# Masked arrays are now handled by the Path class itself
if subslice is not None:
xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy[subslice, :].T)
_path = Path(np.asarray(xy).T,
_interpolation_steps=self._path._interpolation_steps)
else:
_path = self._path
self._transformed_path = TransformedPath(_path, self.get_transform())
def _get_transformed_path(self):
"""Return this line's `~matplotlib.transforms.TransformedPath`."""
if self._transformed_path is None:
self._transform_path()
return self._transformed_path
def set_transform(self, t):
# docstring inherited
self._invalidx = True
self._invalidy = True
super().set_transform(t)
@allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
if self._invalidy or self._invalidx:
self.recache()
self.ind_offset = 0 # Needed for contains() method.
if self._subslice and self.axes:
x0, x1 = self.axes.get_xbound()
i0 = self._x_filled.searchsorted(x0, 'left')
i1 = self._x_filled.searchsorted(x1, 'right')
subslice = slice(max(i0 - 1, 0), i1 + 1)
self.ind_offset = subslice.start
self._transform_path(subslice)
else:
subslice = None
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.open_group('line2d', self.get_gid())
if self._lineStyles[self._linestyle] != '_draw_nothing':
tpath, affine = (self._get_transformed_path()
.get_transformed_path_and_affine())
if len(tpath.vertices):
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_url(self.get_url())
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# We first draw a path within the gaps if needed.
if self.is_dashed() and self._gapcolor is not None:
lc_rgba = mcolors.to_rgba(self._gapcolor, self._alpha)
gc.set_foreground(lc_rgba, isRGBA=True)
offset_gaps, gaps = _get_inverse_dash_pattern(
*self._dash_pattern)
gc.set_dashes(offset_gaps, gaps)
renderer.draw_path(gc, tpath, affine.frozen())
lc_rgba = mcolors.to_rgba(self._color, self._alpha)
gc.set_foreground(lc_rgba, isRGBA=True)
gc.set_dashes(*self._dash_pattern)
renderer.draw_path(gc, tpath, affine.frozen())
gc.restore()
if self._marker and self._markersize > 0:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_url(self.get_url())
gc.set_linewidth(self._markeredgewidth)
gc.set_antialiased(self._antialiased)
ec_rgba = mcolors.to_rgba(
self.get_markeredgecolor(), self._alpha)
fc_rgba = mcolors.to_rgba(
self._get_markerfacecolor(), self._alpha)
fcalt_rgba = mcolors.to_rgba(
self._get_markerfacecolor(alt=True), self._alpha)
# If the edgecolor is "auto", it is set according to the *line*
# color but inherits the alpha value of the *face* color, if any.
if (cbook._str_equal(self._markeredgecolor, "auto")
and not cbook._str_lower_equal(
self.get_markerfacecolor(), "none")):
ec_rgba = ec_rgba[:3] + (fc_rgba[3],)
gc.set_foreground(ec_rgba, isRGBA=True)
if self.get_sketch_params() is not None:
scale, length, randomness = self.get_sketch_params()
gc.set_sketch_params(scale/2, length/2, 2*randomness)
marker = self._marker
# Markers *must* be drawn ignoring the drawstyle (but don't pay the
# recaching if drawstyle is already "default").
if self.get_drawstyle() != "default":
with cbook._setattr_cm(
self, _drawstyle="default", _transformed_path=None):
self.recache()
self._transform_path(subslice)
tpath, affine = (self._get_transformed_path()
.get_transformed_points_and_affine())
else:
tpath, affine = (self._get_transformed_path()
.get_transformed_points_and_affine())
if len(tpath.vertices):
# subsample the markers if markevery is not None
markevery = self.get_markevery()
if markevery is not None:
subsampled = _mark_every_path(
markevery, tpath, affine, self.axes)
else:
subsampled = tpath
snap = marker.get_snap_threshold()
if isinstance(snap, Real):
snap = renderer.points_to_pixels(self._markersize) >= snap
gc.set_snap(snap)
gc.set_joinstyle(marker.get_joinstyle())
gc.set_capstyle(marker.get_capstyle())
marker_path = marker.get_path()
marker_trans = marker.get_transform()
w = renderer.points_to_pixels(self._markersize)
if cbook._str_equal(marker.get_marker(), ","):
gc.set_linewidth(0)
else:
# Don't scale for pixels, and don't stroke them
marker_trans = marker_trans.scale(w)
renderer.draw_markers(gc, marker_path, marker_trans,
subsampled, affine.frozen(),
fc_rgba)
alt_marker_path = marker.get_alt_path()
if alt_marker_path:
alt_marker_trans = marker.get_alt_transform()
alt_marker_trans = alt_marker_trans.scale(w)
renderer.draw_markers(
gc, alt_marker_path, alt_marker_trans, subsampled,
affine.frozen(), fcalt_rgba)
gc.restore()
renderer.close_group('line2d')
self.stale = False
def get_antialiased(self):
"""Return whether antialiased rendering is used."""
return self._antialiased
def get_color(self):
"""
Return the line color.
See also `~.Line2D.set_color`.
"""
return self._color
def get_drawstyle(self):
"""
Return the drawstyle.
See also `~.Line2D.set_drawstyle`.
"""
return self._drawstyle
def get_gapcolor(self):
"""
Return the line gapcolor.
See also `~.Line2D.set_gapcolor`.
"""
return self._gapcolor
def get_linestyle(self):
"""
Return the linestyle.
See also `~.Line2D.set_linestyle`.
"""
return self._linestyle
def get_linewidth(self):
"""
Return the linewidth in points.
See also `~.Line2D.set_linewidth`.
"""
return self._linewidth
def get_marker(self):
"""
Return the line marker.
See also `~.Line2D.set_marker`.
"""
return self._marker.get_marker()
def get_markeredgecolor(self):
"""
Return the marker edge color.
See also `~.Line2D.set_markeredgecolor`.
"""
mec = self._markeredgecolor
if cbook._str_equal(mec, 'auto'):
if mpl.rcParams['_internal.classic_mode']:
if self._marker.get_marker() in ('.', ','):
return self._color
if (self._marker.is_filled()
and self._marker.get_fillstyle() != 'none'):
return 'k' # Bad hard-wired default...
return self._color
else:
return mec
def get_markeredgewidth(self):
"""
Return the marker edge width in points.
See also `~.Line2D.set_markeredgewidth`.
"""
return self._markeredgewidth
def _get_markerfacecolor(self, alt=False):
if self._marker.get_fillstyle() == 'none':
return 'none'
fc = self._markerfacecoloralt if alt else self._markerfacecolor
if cbook._str_lower_equal(fc, 'auto'):
return self._color
else:
return fc
def get_markerfacecolor(self):
"""
Return the marker face color.
See also `~.Line2D.set_markerfacecolor`.
"""
return self._get_markerfacecolor(alt=False)
def get_markerfacecoloralt(self):
"""
Return the alternate marker face color.
See also `~.Line2D.set_markerfacecoloralt`.
"""
return self._get_markerfacecolor(alt=True)
def get_markersize(self):
"""
Return the marker size in points.
See also `~.Line2D.set_markersize`.
"""
return self._markersize
def get_data(self, orig=True):
"""
Return the line data as an ``(xdata, ydata)`` pair.
If *orig* is *True*, return the original data.
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalidx:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalidy:
self.recache()
return self._y
def get_path(self):
"""Return the `~matplotlib.path.Path` associated with this line."""
if self._invalidy or self._invalidx:
self.recache()
return self._path
def get_xydata(self):
"""Return the *xy* data as a (N, 2) array."""
if self._invalidy or self._invalidx:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
Set whether to use antialiased rendering.
Parameters
----------
b : bool
"""
if self._antialiased != b:
self.stale = True
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line.
Parameters
----------
color : :mpltype:`color`
"""
mcolors._check_color_like(color=color)
self._color = color
self.stale = True
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot.
The drawstyle determines how the points are connected.
Parameters
----------
drawstyle : {'default', 'steps', 'steps-pre', 'steps-mid', \
'steps-post'}, default: 'default'
For 'default', the points are connected with straight lines.
The steps variants connect the points with step-like lines,
i.e. horizontal lines with vertical steps. They differ in the
location of the step:
- 'steps-pre': The step is at the beginning of the line segment,
i.e. the line will be at the y-value of point to the right.
- 'steps-mid': The step is halfway between the points.
- 'steps-post: The step is at the end of the line segment,
i.e. the line will be at the y-value of the point to the left.
- 'steps' is equal to 'steps-pre' and is maintained for
backward-compatibility.
For examples see :doc:`/gallery/lines_bars_and_markers/step_demo`.
"""
if drawstyle is None:
drawstyle = 'default'
_api.check_in_list(self.drawStyles, drawstyle=drawstyle)
if self._drawstyle != drawstyle:
self.stale = True
# invalidate to trigger a recache of the path
self._invalidx = True
self._drawstyle = drawstyle
def set_gapcolor(self, gapcolor):
"""
Set a color to fill the gaps in the dashed line style.
.. note::
Striped lines are created by drawing two interleaved dashed lines.
There can be overlaps between those two, which may result in
artifacts when using transparency.
This functionality is experimental and may change.
Parameters
----------
gapcolor : :mpltype:`color` or None
The color with which to fill the gaps. If None, the gaps are
unfilled.
"""
if gapcolor is not None:
mcolors._check_color_like(color=gapcolor)
self._gapcolor = gapcolor
self.stale = True
def set_linewidth(self, w):
"""
Set the line width in points.
Parameters
----------
w : float
Line width, in points.
"""
w = float(w)
if self._linewidth != w:
self.stale = True
self._linewidth = w
self._dash_pattern = _scale_dashes(*self._unscaled_dash_pattern, w)
def set_linestyle(self, ls):
"""
Set the linestyle of the line.
Parameters
----------
ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
Possible values:
- A string:
========================================== =================
linestyle description
========================================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
``'none'``, ``'None'``, ``' '``, or ``''`` draw nothing
========================================== =================
- Alternatively a dash tuple of the following form can be
provided::
(offset, onoffseq)
where ``onoffseq`` is an even length tuple of on and off ink
in points. See also :meth:`set_dashes`.
For examples see :doc:`/gallery/lines_bars_and_markers/linestyles`.
"""
if isinstance(ls, str):
if ls in [' ', '', 'none']:
ls = 'None'
_api.check_in_list([*self._lineStyles, *ls_mapper_r], ls=ls)
if ls not in self._lineStyles:
ls = ls_mapper_r[ls]
self._linestyle = ls
else:
self._linestyle = '--'
self._unscaled_dash_pattern = _get_dash_pattern(ls)
self._dash_pattern = _scale_dashes(
*self._unscaled_dash_pattern, self._linewidth)
self.stale = True
@_docstring.interpd
def set_marker(self, marker):
"""
Set the line marker.
Parameters
----------
marker : marker style string, `~.path.Path` or `~.markers.MarkerStyle`
See `~matplotlib.markers` for full description of possible
arguments.
"""
self._marker = MarkerStyle(marker, self._marker.get_fillstyle())
self.stale = True
def _set_markercolor(self, name, has_rcdefault, val):
if val is None:
val = mpl.rcParams[f"lines.{name}"] if has_rcdefault else "auto"
attr = f"_{name}"
current = getattr(self, attr)
if current is None:
self.stale = True
else:
neq = current != val
# Much faster than `np.any(current != val)` if no arrays are used.
if neq.any() if isinstance(neq, np.ndarray) else neq:
self.stale = True
setattr(self, attr, val)
def set_markeredgecolor(self, ec):
"""
Set the marker edge color.
Parameters
----------
ec : :mpltype:`color`
"""
self._set_markercolor("markeredgecolor", True, ec)
def set_markerfacecolor(self, fc):
"""
Set the marker face color.
Parameters
----------
fc : :mpltype:`color`
"""
self._set_markercolor("markerfacecolor", True, fc)
def set_markerfacecoloralt(self, fc):
"""
Set the alternate marker face color.
Parameters
----------
fc : :mpltype:`color`
"""
self._set_markercolor("markerfacecoloralt", False, fc)
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points.
Parameters
----------
ew : float
Marker edge width, in points.
"""
if ew is None:
ew = mpl.rcParams['lines.markeredgewidth']
if self._markeredgewidth != ew:
self.stale = True
self._markeredgewidth = ew
def set_markersize(self, sz):
"""
Set the marker size in points.
Parameters
----------
sz : float
Marker size, in points.
"""
sz = float(sz)
if self._markersize != sz:
self.stale = True
self._markersize = sz
def set_xdata(self, x):
"""
Set the data array for x.
Parameters
----------
x : 1D array
See Also
--------
set_data
set_ydata
"""
if not np.iterable(x):
raise RuntimeError('x must be a sequence')
self._xorig = copy.copy(x)
self._invalidx = True
self.stale = True
def set_ydata(self, y):
"""
Set the data array for y.
Parameters
----------
y : 1D array
See Also
--------
set_data
set_xdata
"""
if not np.iterable(y):
raise RuntimeError('y must be a sequence')
self._yorig = copy.copy(y)
self._invalidy = True
self.stale = True
def set_dashes(self, seq):
"""
Set the dash sequence.
The dash sequence is a sequence of floats of even length describing
the length of dashes and spaces in points.
For example, (5, 2, 1, 2) describes a sequence of 5 point and 1 point
dashes separated by 2 point spaces.
See also `~.Line2D.set_gapcolor`, which allows those spaces to be
filled with a color.
Parameters
----------
seq : sequence of floats (on/off ink in points) or (None, None)
If *seq* is empty or ``(None, None)``, the linestyle will be set
to solid.
"""
if seq == (None, None) or len(seq) == 0:
self.set_linestyle('-')
else:
self.set_linestyle((0, seq))
def update_from(self, other):
"""Copy properties from *other* to self."""
super().update_from(other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._gapcolor = other._gapcolor
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markerfacecoloralt = other._markerfacecoloralt
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._unscaled_dash_pattern = other._unscaled_dash_pattern
self._dash_pattern = other._dash_pattern
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = MarkerStyle(marker=other._marker)
self._drawstyle = other._drawstyle
@_docstring.interpd
def set_dash_joinstyle(self, s):
"""
How to join segments of the line if it `~Line2D.is_dashed`.
The default joinstyle is :rc:`lines.dash_joinstyle`.
Parameters
----------
s : `.JoinStyle` or %(JoinStyle)s
"""
js = JoinStyle(s)
if self._dashjoinstyle != js:
self.stale = True
self._dashjoinstyle = js
@_docstring.interpd
def set_solid_joinstyle(self, s):
"""
How to join segments if the line is solid (not `~Line2D.is_dashed`).
The default joinstyle is :rc:`lines.solid_joinstyle`.
Parameters
----------
s : `.JoinStyle` or %(JoinStyle)s
"""
js = JoinStyle(s)
if self._solidjoinstyle != js:
self.stale = True
self._solidjoinstyle = js
def get_dash_joinstyle(self):
"""
Return the `.JoinStyle` for dashed lines.
See also `~.Line2D.set_dash_joinstyle`.
"""
return self._dashjoinstyle.name
def get_solid_joinstyle(self):
"""
Return the `.JoinStyle` for solid lines.
See also `~.Line2D.set_solid_joinstyle`.
"""
return self._solidjoinstyle.name
@_docstring.interpd
def set_dash_capstyle(self, s):
"""
How to draw the end caps if the line is `~Line2D.is_dashed`.
The default capstyle is :rc:`lines.dash_capstyle`.
Parameters
----------
s : `.CapStyle` or %(CapStyle)s
"""
cs = CapStyle(s)
if self._dashcapstyle != cs:
self.stale = True
self._dashcapstyle = cs
@_docstring.interpd
def set_solid_capstyle(self, s):
"""
How to draw the end caps if the line is solid (not `~Line2D.is_dashed`)
The default capstyle is :rc:`lines.solid_capstyle`.
Parameters
----------
s : `.CapStyle` or %(CapStyle)s
"""
cs = CapStyle(s)
if self._solidcapstyle != cs:
self.stale = True
self._solidcapstyle = cs
def get_dash_capstyle(self):
"""
Return the `.CapStyle` for dashed lines.
See also `~.Line2D.set_dash_capstyle`.
"""
return self._dashcapstyle.name
def get_solid_capstyle(self):
"""
Return the `.CapStyle` for solid lines.
See also `~.Line2D.set_solid_capstyle`.
"""
return self._solidcapstyle.name
def is_dashed(self):
"""
Return whether line has a dashed linestyle.
A custom linestyle is assumed to be dashed, we do not inspect the
``onoffseq`` directly.
See also `~.Line2D.set_linestyle`.
"""
return self._linestyle in ('--', '-.', ':')
class AxLine(Line2D):
"""
A helper class that implements `~.Axes.axline`, by recomputing the artist
transform at draw time.
"""
def __init__(self, xy1, xy2, slope, **kwargs):
"""
Parameters
----------
xy1 : (float, float)
The first set of (x, y) coordinates for the line to pass through.
xy2 : (float, float) or None
The second set of (x, y) coordinates for the line to pass through.
Both *xy2* and *slope* must be passed, but one of them must be None.
slope : float or None
The slope of the line. Both *xy2* and *slope* must be passed, but one of
them must be None.
"""
super().__init__([0, 1], [0, 1], **kwargs)
if (xy2 is None and slope is None or
xy2 is not None and slope is not None):
raise TypeError(
"Exactly one of 'xy2' and 'slope' must be given")
self._slope = slope
self._xy1 = xy1
self._xy2 = xy2
def get_transform(self):
ax = self.axes
points_transform = self._transform - ax.transData + ax.transScale
if self._xy2 is not None:
# two points were given
(x1, y1), (x2, y2) = \
points_transform.transform([self._xy1, self._xy2])
dx = x2 - x1
dy = y2 - y1
if dx == 0:
if dy == 0:
raise ValueError(
f"Cannot draw a line through two identical points "
f"(x={(x1, x2)}, y={(y1, y2)})")
slope = np.inf
else:
slope = dy / dx
else:
# one point and a slope were given
x1, y1 = points_transform.transform(self._xy1)
slope = self._slope
(vxlo, vylo), (vxhi, vyhi) = ax.transScale.transform(ax.viewLim)
# General case: find intersections with view limits in either
# direction, and draw between the middle two points.
if slope == 0:
start = vxlo, y1
stop = vxhi, y1
elif np.isinf(slope):
start = x1, vylo
stop = x1, vyhi
else:
_, start, stop, _ = sorted([
(vxlo, y1 + (vxlo - x1) * slope),
(vxhi, y1 + (vxhi - x1) * slope),
(x1 + (vylo - y1) / slope, vylo),
(x1 + (vyhi - y1) / slope, vyhi),
])
return (BboxTransformTo(Bbox([start, stop]))
+ ax.transLimits + ax.transAxes)
def draw(self, renderer):
self._transformed_path = None # Force regen.
super().draw(renderer)
def get_xy1(self):
"""Return the *xy1* value of the line."""
return self._xy1
def get_xy2(self):
"""Return the *xy2* value of the line."""
return self._xy2
def get_slope(self):
"""Return the *slope* value of the line."""
return self._slope
def set_xy1(self, *args, **kwargs):
"""
Set the *xy1* value of the line.
Parameters
----------
xy1 : tuple[float, float]
Points for the line to pass through.
"""
params = _api.select_matching_signature([
lambda self, x, y: locals(), lambda self, xy1: locals(),
], self, *args, **kwargs)
if "x" in params:
_api.warn_deprecated("3.10", message=(
"Passing x and y separately to AxLine.set_xy1 is deprecated since "
"%(since)s; pass them as a single tuple instead."))
xy1 = params["x"], params["y"]
else:
xy1 = params["xy1"]
self._xy1 = xy1
def set_xy2(self, *args, **kwargs):
"""
Set the *xy2* value of the line.
.. note::
You can only set *xy2* if the line was created using the *xy2*
parameter. If the line was created using *slope*, please use
`~.AxLine.set_slope`.
Parameters
----------
xy2 : tuple[float, float]
Points for the line to pass through.
"""
if self._slope is None:
params = _api.select_matching_signature([
lambda self, x, y: locals(), lambda self, xy2: locals(),
], self, *args, **kwargs)
if "x" in params:
_api.warn_deprecated("3.10", message=(
"Passing x and y separately to AxLine.set_xy2 is deprecated since "
"%(since)s; pass them as a single tuple instead."))
xy2 = params["x"], params["y"]
else:
xy2 = params["xy2"]
self._xy2 = xy2
else:
raise ValueError("Cannot set an 'xy2' value while 'slope' is set;"
" they differ but their functionalities overlap")
def set_slope(self, slope):
"""
Set the *slope* value of the line.
.. note::
You can only set *slope* if the line was created using the *slope*
parameter. If the line was created using *xy2*, please use
`~.AxLine.set_xy2`.
Parameters
----------
slope : float
The slope of the line.
"""
if self._xy2 is None:
self._slope = slope
else:
raise ValueError("Cannot set a 'slope' value while 'xy2' is set;"
" they differ but their functionalities overlap")
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for `.Line2D`.
Derived classes should override the `process_selected` method to do
something with the picks.
Here is an example which highlights the selected verts with red circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
super().__init__(line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig, ax = plt.subplots()
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Parameters
----------
line : `~matplotlib.lines.Line2D`
The line must already have been added to an `~.axes.Axes` and must
have its picker property set.
"""
if line.axes is None:
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property '
'of the line')
self.axes = line.axes
self.line = line
self.cid = self.canvas.callbacks._connect_picklable(
'pick_event', self.onpick)
self.ind = set()
canvas = property(lambda self: self.axes.get_figure(root=True).canvas)
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the `process_selected` method.
Parameters
----------
ind : list of int
The indices of the selected vertices.
xs, ys : array-like
The coordinates of the selected vertices.
"""
pass
def onpick(self, event):
"""When the line is picked, update the set of selected indices."""
if event.artist is not self.line:
return
self.ind ^= set(event.ind)
ind = sorted(self.ind)
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = MarkerStyle.markers
drawStyles = Line2D.drawStyles
fillStyles = MarkerStyle.fillstyles
venv\Lib\site-packages\matplotlib\markers.py
r"""
Functions to handle markers; used by the marker functionality of
`~matplotlib.axes.Axes.plot`, `~matplotlib.axes.Axes.scatter`, and
`~matplotlib.axes.Axes.errorbar`.
All possible markers are defined here:
============================== ====== =========================================
marker symbol description
============================== ====== =========================================
``"."`` |m00| point
``","`` |m01| pixel
``"o"`` |m02| circle
``"v"`` |m03| triangle_down
``"^"`` |m04| triangle_up
``"<"`` |m05| triangle_left
``">"`` |m06| triangle_right
``"1"`` |m07| tri_down
``"2"`` |m08| tri_up
``"3"`` |m09| tri_left
``"4"`` |m10| tri_right
``"8"`` |m11| octagon
``"s"`` |m12| square
``"p"`` |m13| pentagon
``"P"`` |m23| plus (filled)
``"*"`` |m14| star
``"h"`` |m15| hexagon1
``"H"`` |m16| hexagon2
``"+"`` |m17| plus
``"x"`` |m18| x
``"X"`` |m24| x (filled)
``"D"`` |m19| diamond
``"d"`` |m20| thin_diamond
``"|"`` |m21| vline
``"_"`` |m22| hline
``0`` (``TICKLEFT``) |m25| tickleft
``1`` (``TICKRIGHT``) |m26| tickright
``2`` (``TICKUP``) |m27| tickup
``3`` (``TICKDOWN``) |m28| tickdown
``4`` (``CARETLEFT``) |m29| caretleft
``5`` (``CARETRIGHT``) |m30| caretright
``6`` (``CARETUP``) |m31| caretup
``7`` (``CARETDOWN``) |m32| caretdown
``8`` (``CARETLEFTBASE``) |m33| caretleft (centered at base)
``9`` (``CARETRIGHTBASE``) |m34| caretright (centered at base)
``10`` (``CARETUPBASE``) |m35| caretup (centered at base)
``11`` (``CARETDOWNBASE``) |m36| caretdown (centered at base)
``"none"`` or ``"None"`` nothing
``" "`` or ``""`` nothing
``"$...$"`` |m37| Render the string using mathtext.
E.g ``"$f$"`` for marker showing the
letter ``f``.
``verts`` A list of (x, y) pairs used for Path
vertices. The center of the marker is
located at (0, 0) and the size is
normalized, such that the created path
is encapsulated inside the unit cell.
``path`` A `~matplotlib.path.Path` instance.
``(numsides, 0, angle)`` A regular polygon with ``numsides``
sides, rotated by ``angle``.
``(numsides, 1, angle)`` A star-like symbol with ``numsides``
sides, rotated by ``angle``.
``(numsides, 2, angle)`` An asterisk with ``numsides`` sides,
rotated by ``angle``.
============================== ====== =========================================
Note that special symbols can be defined via the
:ref:`STIX math font `,
e.g. ``"$\u266B$"``. For an overview over the STIX font symbols refer to the
`STIX font table `_.
Also see the :doc:`/gallery/text_labels_and_annotations/stix_fonts_demo`.
Integer numbers from ``0`` to ``11`` create lines and triangles. Those are
equally accessible via capitalized variables, like ``CARETDOWNBASE``.
Hence the following are equivalent::
plt.plot([1, 2, 3], marker=11)
plt.plot([1, 2, 3], marker=matplotlib.markers.CARETDOWNBASE)
Markers join and cap styles can be customized by creating a new instance of
MarkerStyle.
A MarkerStyle can also have a custom `~matplotlib.transforms.Transform`
allowing it to be arbitrarily rotated or offset.
Examples showing the use of markers:
* :doc:`/gallery/lines_bars_and_markers/marker_reference`
* :doc:`/gallery/lines_bars_and_markers/scatter_star_poly`
* :doc:`/gallery/lines_bars_and_markers/multivariate_marker_plot`
.. |m00| image:: /_static/markers/m00.png
.. |m01| image:: /_static/markers/m01.png
.. |m02| image:: /_static/markers/m02.png
.. |m03| image:: /_static/markers/m03.png
.. |m04| image:: /_static/markers/m04.png
.. |m05| image:: /_static/markers/m05.png
.. |m06| image:: /_static/markers/m06.png
.. |m07| image:: /_static/markers/m07.png
.. |m08| image:: /_static/markers/m08.png
.. |m09| image:: /_static/markers/m09.png
.. |m10| image:: /_static/markers/m10.png
.. |m11| image:: /_static/markers/m11.png
.. |m12| image:: /_static/markers/m12.png
.. |m13| image:: /_static/markers/m13.png
.. |m14| image:: /_static/markers/m14.png
.. |m15| image:: /_static/markers/m15.png
.. |m16| image:: /_static/markers/m16.png
.. |m17| image:: /_static/markers/m17.png
.. |m18| image:: /_static/markers/m18.png
.. |m19| image:: /_static/markers/m19.png
.. |m20| image:: /_static/markers/m20.png
.. |m21| image:: /_static/markers/m21.png
.. |m22| image:: /_static/markers/m22.png
.. |m23| image:: /_static/markers/m23.png
.. |m24| image:: /_static/markers/m24.png
.. |m25| image:: /_static/markers/m25.png
.. |m26| image:: /_static/markers/m26.png
.. |m27| image:: /_static/markers/m27.png
.. |m28| image:: /_static/markers/m28.png
.. |m29| image:: /_static/markers/m29.png
.. |m30| image:: /_static/markers/m30.png
.. |m31| image:: /_static/markers/m31.png
.. |m32| image:: /_static/markers/m32.png
.. |m33| image:: /_static/markers/m33.png
.. |m34| image:: /_static/markers/m34.png
.. |m35| image:: /_static/markers/m35.png
.. |m36| image:: /_static/markers/m36.png
.. |m37| image:: /_static/markers/m37.png
"""
import copy
from collections.abc import Sized
import numpy as np
import matplotlib as mpl
from . import _api, cbook
from .path import Path
from .transforms import IdentityTransform, Affine2D
from ._enums import JoinStyle, CapStyle
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE) = range(12)
_empty_path = Path(np.empty((0, 2)))
class MarkerStyle:
"""
A class representing marker types.
Instances are immutable. If you need to change anything, create a new
instance.
Attributes
----------
markers : dict
All known markers.
filled_markers : tuple
All known filled markers. This is a subset of *markers*.
fillstyles : tuple
The supported fillstyles.
"""
markers = {
'.': 'point',
',': 'pixel',
'o': 'circle',
'v': 'triangle_down',
'^': 'triangle_up',
'<': 'triangle_left',
'>': 'triangle_right',
'1': 'tri_down',
'2': 'tri_up',
'3': 'tri_left',
'4': 'tri_right',
'8': 'octagon',
's': 'square',
'p': 'pentagon',
'*': 'star',
'h': 'hexagon1',
'H': 'hexagon2',
'+': 'plus',
'x': 'x',
'D': 'diamond',
'd': 'thin_diamond',
'|': 'vline',
'_': 'hline',
'P': 'plus_filled',
'X': 'x_filled',
TICKLEFT: 'tickleft',
TICKRIGHT: 'tickright',
TICKUP: 'tickup',
TICKDOWN: 'tickdown',
CARETLEFT: 'caretleft',
CARETRIGHT: 'caretright',
CARETUP: 'caretup',
CARETDOWN: 'caretdown',
CARETLEFTBASE: 'caretleftbase',
CARETRIGHTBASE: 'caretrightbase',
CARETUPBASE: 'caretupbase',
CARETDOWNBASE: 'caretdownbase',
"None": 'nothing',
"none": 'nothing',
' ': 'nothing',
'': 'nothing'
}
# Just used for informational purposes. is_filled()
# is calculated in the _set_* functions.
filled_markers = (
'.', 'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd',
'P', 'X')
fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')
_half_fillstyles = ('left', 'right', 'bottom', 'top')
def __init__(self, marker,
fillstyle=None, transform=None, capstyle=None, joinstyle=None):
"""
Parameters
----------
marker : str, array-like, Path, MarkerStyle
- Another instance of `MarkerStyle` copies the details of that *marker*.
- For other possible marker values, see the module docstring
`matplotlib.markers`.
fillstyle : str, default: :rc:`markers.fillstyle`
One of 'full', 'left', 'right', 'bottom', 'top', 'none'.
transform : `~matplotlib.transforms.Transform`, optional
Transform that will be combined with the native transform of the
marker.
capstyle : `.CapStyle` or %(CapStyle)s, optional
Cap style that will override the default cap style of the marker.
joinstyle : `.JoinStyle` or %(JoinStyle)s, optional
Join style that will override the default join style of the marker.
"""
self._marker_function = None
self._user_transform = transform
self._user_capstyle = CapStyle(capstyle) if capstyle is not None else None
self._user_joinstyle = JoinStyle(joinstyle) if joinstyle is not None else None
self._set_fillstyle(fillstyle)
self._set_marker(marker)
def _recache(self):
if self._marker_function is None:
return
self._path = _empty_path
self._transform = IdentityTransform()
self._alt_path = None
self._alt_transform = None
self._snap_threshold = None
self._joinstyle = JoinStyle.round
self._capstyle = self._user_capstyle or CapStyle.butt
# Initial guess: Assume the marker is filled unless the fillstyle is
# set to 'none'. The marker function will override this for unfilled
# markers.
self._filled = self._fillstyle != 'none'
self._marker_function()
def __bool__(self):
return bool(len(self._path.vertices))
def is_filled(self):
return self._filled
def get_fillstyle(self):
return self._fillstyle
def _set_fillstyle(self, fillstyle):
"""
Set the fillstyle.
Parameters
----------
fillstyle : {'full', 'left', 'right', 'bottom', 'top', 'none'}
The part of the marker surface that is colored with
markerfacecolor.
"""
if fillstyle is None:
fillstyle = mpl.rcParams['markers.fillstyle']
_api.check_in_list(self.fillstyles, fillstyle=fillstyle)
self._fillstyle = fillstyle
def get_joinstyle(self):
return self._joinstyle.name
def get_capstyle(self):
return self._capstyle.name
def get_marker(self):
return self._marker
def _set_marker(self, marker):
"""
Set the marker.
Parameters
----------
marker : str, array-like, Path, MarkerStyle
- Another instance of `MarkerStyle` copies the details of that *marker*.
- For other possible marker values see the module docstring
`matplotlib.markers`.
"""
if isinstance(marker, str) and cbook.is_math_text(marker):
self._marker_function = self._set_mathtext_path
elif isinstance(marker, (int, str)) and marker in self.markers:
self._marker_function = getattr(self, '_set_' + self.markers[marker])
elif (isinstance(marker, np.ndarray) and marker.ndim == 2 and
marker.shape[1] == 2):
self._marker_function = self._set_vertices
elif isinstance(marker, Path):
self._marker_function = self._set_path_marker
elif (isinstance(marker, Sized) and len(marker) in (2, 3) and
marker[1] in (0, 1, 2)):
self._marker_function = self._set_tuple_marker
elif isinstance(marker, MarkerStyle):
self.__dict__ = copy.deepcopy(marker.__dict__)
else:
try:
Path(marker)
self._marker_function = self._set_vertices
except ValueError as err:
raise ValueError(
f'Unrecognized marker style {marker!r}') from err
if not isinstance(marker, MarkerStyle):
self._marker = marker
self._recache()
def get_path(self):
"""
Return a `.Path` for the primary part of the marker.
For unfilled markers this is the whole marker, for filled markers,
this is the area to be drawn with *markerfacecolor*.
"""
return self._path
def get_transform(self):
"""
Return the transform to be applied to the `.Path` from
`MarkerStyle.get_path()`.
"""
if self._user_transform is None:
return self._transform.frozen()
else:
return (self._transform + self._user_transform).frozen()
def get_alt_path(self):
"""
Return a `.Path` for the alternate part of the marker.
For unfilled markers, this is *None*; for filled markers, this is the
area to be drawn with *markerfacecoloralt*.
"""
return self._alt_path
def get_alt_transform(self):
"""
Return the transform to be applied to the `.Path` from
`MarkerStyle.get_alt_path()`.
"""
if self._user_transform is None:
return self._alt_transform.frozen()
else:
return (self._alt_transform + self._user_transform).frozen()
def get_snap_threshold(self):
return self._snap_threshold
def get_user_transform(self):
"""Return user supplied part of marker transform."""
if self._user_transform is not None:
return self._user_transform.frozen()
def transformed(self, transform):
"""
Return a new version of this marker with the transform applied.
Parameters
----------
transform : `~matplotlib.transforms.Affine2D`
Transform will be combined with current user supplied transform.
"""
new_marker = MarkerStyle(self)
if new_marker._user_transform is not None:
new_marker._user_transform += transform
else:
new_marker._user_transform = transform
return new_marker
def rotated(self, *, deg=None, rad=None):
"""
Return a new version of this marker rotated by specified angle.
Parameters
----------
deg : float, optional
Rotation angle in degrees.
rad : float, optional
Rotation angle in radians.
.. note:: You must specify exactly one of deg or rad.
"""
if deg is None and rad is None:
raise ValueError('One of deg or rad is required')
if deg is not None and rad is not None:
raise ValueError('Only one of deg and rad can be supplied')
new_marker = MarkerStyle(self)
if new_marker._user_transform is None:
new_marker._user_transform = Affine2D()
if deg is not None:
new_marker._user_transform.rotate_deg(deg)
if rad is not None:
new_marker._user_transform.rotate(rad)
return new_marker
def scaled(self, sx, sy=None):
"""
Return new marker scaled by specified scale factors.
If *sy* is not given, the same scale is applied in both the *x*- and
*y*-directions.
Parameters
----------
sx : float
*X*-direction scaling factor.
sy : float, optional
*Y*-direction scaling factor.
"""
if sy is None:
sy = sx
new_marker = MarkerStyle(self)
_transform = new_marker._user_transform or Affine2D()
new_marker._user_transform = _transform.scale(sx, sy)
return new_marker
def _set_nothing(self):
self._filled = False
def _set_custom_marker(self, path):
rescale = np.max(np.abs(path.vertices)) # max of x's and y's.
self._transform = Affine2D().scale(0.5 / rescale)
self._path = path
def _set_path_marker(self):
self._set_custom_marker(self._marker)
def _set_vertices(self):
self._set_custom_marker(Path(self._marker))
def _set_tuple_marker(self):
marker = self._marker
if len(marker) == 2:
numsides, rotation = marker[0], 0.0
elif len(marker) == 3:
numsides, rotation = marker[0], marker[2]
symstyle = marker[1]
if symstyle == 0:
self._path = Path.unit_regular_polygon(numsides)
self._joinstyle = self._user_joinstyle or JoinStyle.miter
elif symstyle == 1:
self._path = Path.unit_regular_star(numsides)
self._joinstyle = self._user_joinstyle or JoinStyle.bevel
elif symstyle == 2:
self._path = Path.unit_regular_asterisk(numsides)
self._filled = False
self._joinstyle = self._user_joinstyle or JoinStyle.bevel
else:
raise ValueError(f"Unexpected tuple marker: {marker}")
self._transform = Affine2D().scale(0.5).rotate_deg(rotation)
def _set_mathtext_path(self):
"""
Draw mathtext markers '$...$' using `.TextPath` object.
Submitted by tcb
"""
from matplotlib.text import TextPath
# again, the properties could be initialised just once outside
# this function
text = TextPath(xy=(0, 0), s=self.get_marker(),
usetex=mpl.rcParams['text.usetex'])
if len(text.vertices) == 0:
return
bbox = text.get_extents()
max_dim = max(bbox.width, bbox.height)
self._transform = (
Affine2D()
.translate(-bbox.xmin + 0.5 * -bbox.width, -bbox.ymin + 0.5 * -bbox.height)
.scale(1.0 / max_dim))
self._path = text
self._snap = False
def _half_fill(self):
return self.get_fillstyle() in self._half_fillstyles
def _set_circle(self, size=1.0):
self._transform = Affine2D().scale(0.5 * size)
self._snap_threshold = np.inf
if not self._half_fill():
self._path = Path.unit_circle()
else:
self._path = self._alt_path = Path.unit_circle_righthalf()
fs = self.get_fillstyle()
self._transform.rotate_deg(
{'right': 0, 'top': 90, 'left': 180, 'bottom': 270}[fs])
self._alt_transform = self._transform.frozen().rotate_deg(180.)
def _set_point(self):
self._set_circle(size=0.5)
def _set_pixel(self):
self._path = Path.unit_rectangle()
# Ideally, you'd want -0.5, -0.5 here, but then the snapping
# algorithm in the Agg backend will round this to a 2x2
# rectangle from (-1, -1) to (1, 1). By offsetting it
# slightly, we can force it to be (0, 0) to (1, 1), which both
# makes it only be a single pixel and places it correctly
# aligned to 1-width stroking (i.e. the ticks). This hack is
# the best of a number of bad alternatives, mainly because the
# backends are not aware of what marker is actually being used
# beyond just its path data.
self._transform = Affine2D().translate(-0.49999, -0.49999)
self._snap_threshold = None
_triangle_path = Path._create_closed([[0, 1], [-1, -1], [1, -1]])
# Going down halfway looks to small. Golden ratio is too far.
_triangle_path_u = Path._create_closed([[0, 1], [-3/5, -1/5], [3/5, -1/5]])
_triangle_path_d = Path._create_closed(
[[-3/5, -1/5], [3/5, -1/5], [1, -1], [-1, -1]])
_triangle_path_l = Path._create_closed([[0, 1], [0, -1], [-1, -1]])
_triangle_path_r = Path._create_closed([[0, 1], [0, -1], [1, -1]])
def _set_triangle(self, rot, skip):
self._transform = Affine2D().scale(0.5).rotate_deg(rot)
self._snap_threshold = 5.0
if not self._half_fill():
self._path = self._triangle_path
else:
mpaths = [self._triangle_path_u,
self._triangle_path_l,
self._triangle_path_d,
self._triangle_path_r]
fs = self.get_fillstyle()
if fs == 'top':
self._path = mpaths[(0 + skip) % 4]
self._alt_path = mpaths[(2 + skip) % 4]
elif fs == 'bottom':
self._path = mpaths[(2 + skip) % 4]
self._alt_path = mpaths[(0 + skip) % 4]
elif fs == 'left':
self._path = mpaths[(1 + skip) % 4]
self._alt_path = mpaths[(3 + skip) % 4]
else:
self._path = mpaths[(3 + skip) % 4]
self._alt_path = mpaths[(1 + skip) % 4]
self._alt_transform = self._transform
self._joinstyle = self._user_joinstyle or JoinStyle.miter
def _set_triangle_up(self):
return self._set_triangle(0.0, 0)
def _set_triangle_down(self):
return self._set_triangle(180.0, 2)
def _set_triangle_left(self):
return self._set_triangle(90.0, 3)
def _set_triangle_right(self):
return self._set_triangle(270.0, 1)
def _set_square(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 2.0
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
# Build a bottom filled square out of two rectangles, one filled.
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],
[0.0, 0.5], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],
[0.0, 1.0], [0.0, 0.5]])
fs = self.get_fillstyle()
rotate = {'bottom': 0, 'right': 90, 'top': 180, 'left': 270}[fs]
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = self._user_joinstyle or JoinStyle.miter
def _set_diamond(self):
self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)
self._snap_threshold = 5.0
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
self._path = Path([[0, 0], [1, 0], [1, 1], [0, 0]])
self._alt_path = Path([[0, 0], [0, 1], [1, 1], [0, 0]])
fs = self.get_fillstyle()
rotate = {'right': 0, 'top': 90, 'left': 180, 'bottom': 270}[fs]
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = self._user_joinstyle or JoinStyle.miter
def _set_thin_diamond(self):
self._set_diamond()
self._transform.scale(0.6, 1.0)
def _set_pentagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
polypath = Path.unit_regular_polygon(5)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
y = (1 + np.sqrt(5)) / 4.
top = Path(verts[[0, 1, 4, 0]])
bottom = Path(verts[[1, 2, 3, 4, 1]])
left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])
right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])
self._path, self._alt_path = {
'top': (top, bottom), 'bottom': (bottom, top),
'left': (left, right), 'right': (right, left),
}[self.get_fillstyle()]
self._alt_transform = self._transform
self._joinstyle = self._user_joinstyle or JoinStyle.miter
def _set_star(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
polypath = Path.unit_regular_star(5, innerCircle=0.381966)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
top = Path(np.concatenate([verts[0:4], verts[7:10], verts[0:1]]))
bottom = Path(np.concatenate([verts[3:8], verts[3:4]]))
left = Path(np.concatenate([verts[0:6], verts[0:1]]))
right = Path(np.concatenate([verts[0:1], verts[5:10], verts[0:1]]))
self._path, self._alt_path = {
'top': (top, bottom), 'bottom': (bottom, top),
'left': (left, right), 'right': (right, left),
}[self.get_fillstyle()]
self._alt_transform = self._transform
self._joinstyle = self._user_joinstyle or JoinStyle.bevel
def _set_hexagon1(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = None
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x = np.abs(np.cos(5 * np.pi / 6.))
top = Path(np.concatenate([[(-x, 0)], verts[[1, 0, 5]], [(x, 0)]]))
bottom = Path(np.concatenate([[(-x, 0)], verts[2:5], [(x, 0)]]))
left = Path(verts[0:4])
right = Path(verts[[0, 5, 4, 3]])
self._path, self._alt_path = {
'top': (top, bottom), 'bottom': (bottom, top),
'left': (left, right), 'right': (right, left),
}[self.get_fillstyle()]
self._alt_transform = self._transform
self._joinstyle = self._user_joinstyle or JoinStyle.miter
def _set_hexagon2(self):
self._transform = Affine2D().scale(0.5).rotate_deg(30)
self._snap_threshold = None
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x, y = np.sqrt(3) / 4, 3 / 4.
top = Path(verts[[1, 0, 5, 4, 1]])
bottom = Path(verts[1:5])
left = Path(np.concatenate([
[(x, y)], verts[:3], [(-x, -y), (x, y)]]))
right = Path(np.concatenate([
[(x, y)], verts[5:2:-1], [(-x, -y)]]))
self._path, self._alt_path = {
'top': (top, bottom), 'bottom': (bottom, top),
'left': (left, right), 'right': (right, left),
}[self.get_fillstyle()]
self._alt_transform = self._transform
self._joinstyle = self._user_joinstyle or JoinStyle.miter
def _set_octagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
polypath = Path.unit_regular_polygon(8)
if not self._half_fill():
self._transform.rotate_deg(22.5)
self._path = polypath
else:
x = np.sqrt(2.) / 4.
self._path = self._alt_path = Path(
[[0, -1], [0, 1], [-x, 1], [-1, x],
[-1, -x], [-x, -1], [0, -1]])
fs = self.get_fillstyle()
self._transform.rotate_deg(
{'left': 0, 'bottom': 90, 'right': 180, 'top': 270}[fs])
self._alt_transform = self._transform.frozen().rotate_deg(180.0)
self._joinstyle = self._user_joinstyle or JoinStyle.miter
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _set_vline(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
def _set_hline(self):
self._set_vline()
self._transform = self._transform.rotate_deg(90)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _set_tickleft(self):
self._transform = Affine2D().scale(-1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
def _set_tickright(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _set_tickup(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
def _set_tickdown(self):
self._transform = Affine2D().scale(1.0, -1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_tri_down(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_up(self):
self._set_tri_down()
self._transform = self._transform.rotate_deg(180)
def _set_tri_left(self):
self._set_tri_down()
self._transform = self._transform.rotate_deg(270)
def _set_tri_right(self):
self._set_tri_down()
self._transform = self._transform.rotate_deg(90)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _set_caretdown(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = self._user_joinstyle or JoinStyle.miter
def _set_caretup(self):
self._set_caretdown()
self._transform = self._transform.rotate_deg(180)
def _set_caretleft(self):
self._set_caretdown()
self._transform = self._transform.rotate_deg(270)
def _set_caretright(self):
self._set_caretdown()
self._transform = self._transform.rotate_deg(90)
_caret_path_base = Path([[-1.0, 0.0], [0.0, -1.5], [1.0, 0]])
def _set_caretdownbase(self):
self._set_caretdown()
self._path = self._caret_path_base
def _set_caretupbase(self):
self._set_caretdownbase()
self._transform = self._transform.rotate_deg(180)
def _set_caretleftbase(self):
self._set_caretdownbase()
self._transform = self._transform.rotate_deg(270)
def _set_caretrightbase(self):
self._set_caretdownbase()
self._transform = self._transform.rotate_deg(90)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_plus(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._plus_path
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_x(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._x_path
_plus_filled_path = Path._create_closed(np.array([
(-1, -3), (+1, -3), (+1, -1), (+3, -1), (+3, +1), (+1, +1),
(+1, +3), (-1, +3), (-1, +1), (-3, +1), (-3, -1), (-1, -1)]) / 6)
_plus_filled_path_t = Path._create_closed(np.array([
(+3, 0), (+3, +1), (+1, +1), (+1, +3),
(-1, +3), (-1, +1), (-3, +1), (-3, 0)]) / 6)
def _set_plus_filled(self):
self._transform = Affine2D()
self._snap_threshold = 5.0
self._joinstyle = self._user_joinstyle or JoinStyle.miter
if not self._half_fill():
self._path = self._plus_filled_path
else:
# Rotate top half path to support all partitions
self._path = self._alt_path = self._plus_filled_path_t
fs = self.get_fillstyle()
self._transform.rotate_deg(
{'top': 0, 'left': 90, 'bottom': 180, 'right': 270}[fs])
self._alt_transform = self._transform.frozen().rotate_deg(180)
_x_filled_path = Path._create_closed(np.array([
(-1, -2), (0, -1), (+1, -2), (+2, -1), (+1, 0), (+2, +1),
(+1, +2), (0, +1), (-1, +2), (-2, +1), (-1, 0), (-2, -1)]) / 4)
_x_filled_path_t = Path._create_closed(np.array([
(+1, 0), (+2, +1), (+1, +2), (0, +1),
(-1, +2), (-2, +1), (-1, 0)]) / 4)
def _set_x_filled(self):
self._transform = Affine2D()
self._snap_threshold = 5.0
self._joinstyle = self._user_joinstyle or JoinStyle.miter
if not self._half_fill():
self._path = self._x_filled_path
else:
# Rotate top half path to support all partitions
self._path = self._alt_path = self._x_filled_path_t
fs = self.get_fillstyle()
self._transform.rotate_deg(
{'top': 0, 'left': 90, 'bottom': 180, 'right': 270}[fs])
self._alt_transform = self._transform.frozen().rotate_deg(180)
venv\Lib\site-packages\matplotlib\mathtext.py
r"""
A module for parsing a subset of the TeX math syntax and rendering it to a
Matplotlib backend.
For a tutorial of its usage, see :ref:`mathtext`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: https://pypi.org/project/pyparsing/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
"""
import functools
import logging
import matplotlib as mpl
from matplotlib import _api, _mathtext
from matplotlib.ft2font import LoadFlags
from matplotlib.font_manager import FontProperties
from ._mathtext import ( # noqa: F401, reexported API
RasterParse, VectorParse, get_unicode_index)
_log = logging.getLogger(__name__)
get_unicode_index.__module__ = __name__
##############################################################################
# MAIN
class MathTextParser:
_parser = None
_font_type_mapping = {
'cm': _mathtext.BakomaFonts,
'dejavuserif': _mathtext.DejaVuSerifFonts,
'dejavusans': _mathtext.DejaVuSansFonts,
'stix': _mathtext.StixFonts,
'stixsans': _mathtext.StixSansFonts,
'custom': _mathtext.UnicodeFonts,
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
Parameters
----------
output : {"path", "agg"}
Whether to return a `VectorParse` ("path") or a
`RasterParse` ("agg", or its synonym "macosx").
"""
self._output_type = _api.check_getitem(
{"path": "vector", "agg": "raster", "macosx": "raster"},
output=output.lower())
def parse(self, s, dpi=72, prop=None, *, antialiased=None):
"""
Parse the given math expression *s* at the given *dpi*. If *prop* is
provided, it is a `.FontProperties` object specifying the "default"
font to use in the math expression, used for all non-math text.
The results are cached, so multiple calls to `parse`
with the same expression should be fast.
Depending on the *output* type, this returns either a `VectorParse` or
a `RasterParse`.
"""
# lru_cache can't decorate parse() directly because prop is
# mutable, so we key the cache using an internal copy (see
# Text._get_text_metrics_with_cache for a similar case); likewise,
# we need to check the mutable state of the text.antialiased and
# text.hinting rcParams.
prop = prop.copy() if prop is not None else None
antialiased = mpl._val_or_rc(antialiased, 'text.antialiased')
from matplotlib.backends import backend_agg
load_glyph_flags = {
"vector": LoadFlags.NO_HINTING,
"raster": backend_agg.get_hinting_flag(),
}[self._output_type]
return self._parse_cached(s, dpi, prop, antialiased, load_glyph_flags)
@functools.lru_cache(50)
def _parse_cached(self, s, dpi, prop, antialiased, load_glyph_flags):
if prop is None:
prop = FontProperties()
fontset_class = _api.check_getitem(
self._font_type_mapping, fontset=prop.get_math_fontfamily())
fontset = fontset_class(prop, load_glyph_flags)
fontsize = prop.get_size_in_points()
if self._parser is None: # Cache the parser globally.
self.__class__._parser = _mathtext.Parser()
box = self._parser.parse(s, fontset, fontsize, dpi)
output = _mathtext.ship(box)
if self._output_type == "vector":
return output.to_vector()
elif self._output_type == "raster":
return output.to_raster(antialiased=antialiased)
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None,
*, color=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
Parameters
----------
s : str
A math expression. The math portion must be enclosed in dollar signs.
filename_or_obj : str or path-like or file-like
Where to write the image data.
prop : `.FontProperties`, optional
The size and style of the text.
dpi : float, optional
The output dpi. If not set, the dpi is determined as for
`.Figure.savefig`.
format : str, optional
The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not set, the
format is determined as for `.Figure.savefig`.
color : str, optional
Foreground color, defaults to :rc:`text.color`.
"""
from matplotlib import figure
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop, color=color)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
venv\Lib\site-packages\matplotlib\mlab.py
"""
Numerical Python functions written for compatibility with MATLAB
commands with the same names. Most numerical Python functions can be found in
the `NumPy`_ and `SciPy`_ libraries. What remains here is code for performing
spectral computations and kernel density estimations.
.. _NumPy: https://numpy.org
.. _SciPy: https://www.scipy.org
Spectral functions
------------------
`cohere`
Coherence (normalized cross spectral density)
`csd`
Cross spectral density using Welch's average periodogram
`detrend`
Remove the mean or best fit line from an array
`psd`
Power spectral density using Welch's average periodogram
`specgram`
Spectrogram (spectrum over segments of time)
`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
`detrend_mean`
Remove the mean from a line.
`detrend_linear`
Remove the best fit line from a line.
`detrend_none`
Return the original line.
"""
import functools
from numbers import Number
import numpy as np
from matplotlib import _api, _docstring, cbook
def window_hanning(x):
"""
Return *x* times the Hanning (or Hann) window of len(*x*).
See Also
--------
window_none : Another window algorithm.
"""
return np.hanning(len(x))*x
def window_none(x):
"""
No window function; simply return *x*.
See Also
--------
window_hanning : Another window algorithm.
"""
return x
def detrend(x, key=None, axis=None):
"""
Return *x* with its trend removed.
Parameters
----------
x : array or sequence
Array or sequence containing the data.
key : {'default', 'constant', 'mean', 'linear', 'none'} or function
The detrending algorithm to use. 'default', 'mean', and 'constant' are
the same as `detrend_mean`. 'linear' is the same as `detrend_linear`.
'none' is the same as `detrend_none`. The default is 'mean'. See the
corresponding functions for more details regarding the algorithms. Can
also be a function that carries out the detrend operation.
axis : int
The axis along which to do the detrending.
See Also
--------
detrend_mean : Implementation of the 'mean' algorithm.
detrend_linear : Implementation of the 'linear' algorithm.
detrend_none : Implementation of the 'none' algorithm.
"""
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif callable(key):
x = np.asarray(x)
if axis is not None and axis + 1 > x.ndim:
raise ValueError(f'axis(={axis}) out of bounds')
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
else:
raise ValueError(
f"Unknown value for key: {key!r}, must be one of: 'default', "
f"'constant', 'mean', 'linear', or a function")
def detrend_mean(x, axis=None):
"""
Return *x* minus the mean(*x*).
Parameters
----------
x : array or sequence
Array or sequence containing the data
Can have any dimensionality
axis : int
The axis along which to take the mean. See `numpy.mean` for a
description of this argument.
See Also
--------
detrend_linear : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
return x - x.mean(axis, keepdims=True)
def detrend_none(x, axis=None):
"""
Return *x*: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : int
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_linear : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
return x
def detrend_linear(y):
"""
Return *x* minus best fit line; 'linear' detrending.
Parameters
----------
y : 0-D or 1-D array or sequence
Array or sequence containing the data
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=float)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
"""
Private helper implementing the common parts between the psd, csd,
spectrogram and complex, magnitude, angle, and phase spectrums.
"""
if y is None:
# if y is None use x for y
same_data = True
else:
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if noverlap >= NFFT:
raise ValueError('noverlap must be less than NFFT')
if mode is None or mode == 'default':
mode = 'psd'
_api.check_in_list(
['default', 'psd', 'complex', 'magnitude', 'angle', 'phase'],
mode=mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
_api.check_in_list(['default', 'onesided', 'twosided'], sides=sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, NFFT)
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, NFFT)
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
if not np.iterable(window):
window = window(np.ones(NFFT, x.dtype))
if len(window) != NFFT:
raise ValueError(
"The window length must match the data's first dimension")
result = np.lib.stride_tricks.sliding_window_view(
x, NFFT, axis=0)[::NFFT - noverlap].T
result = detrend(result, detrend_func, axis=0)
result = result * window.reshape((-1, 1))
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = np.lib.stride_tricks.sliding_window_view(
y, NFFT, axis=0)[::NFFT - noverlap].T
resultY = detrend(resultY, detrend_func, axis=0)
resultY = resultY * window.reshape((-1, 1))
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conj(result) * resultY
elif mode == 'psd':
result = np.conj(result) * result
elif mode == 'magnitude':
result = np.abs(result) / window.sum()
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
result /= window.sum()
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (window**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= window.sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.roll(freqs, -freqcenter, axis=0)
result = np.roll(result, -freqcenter, axis=0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(
mode, x, Fs=None, window=None, pad_to=None, sides=None):
"""
Private helper implementing the commonality between the complex, magnitude,
angle, and phase spectrums.
"""
_api.check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if spec.ndim == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
_docstring.interpd.register(
Spectral="""\
Fs : float, default: 2
The sampling frequency (samples per time unit). It is used to calculate
the Fourier frequencies, *freqs*, in cycles per time unit.
window : callable or ndarray, default: `.window_hanning`
A function or a vector of length *NFFT*. To create window vectors see
`.window_hanning`, `.window_none`, `numpy.blackman`, `numpy.hamming`,
`numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. If a
function is passed as the argument, it must take a data segment as an
argument and return the windowed version of the segment.
sides : {'default', 'onesided', 'twosided'}, optional
Which sides of the spectrum to return. 'default' is one-sided for real
data and two-sided for complex data. 'onesided' forces the return of a
one-sided spectrum, while 'twosided' forces two-sided.""",
Single_Spectrum="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. While not increasing the actual resolution of the spectrum (the
minimum distance between resolvable peaks), this can give more points in
the plot, allowing for more detail. This corresponds to the *n* parameter
in the call to `~numpy.fft.fft`. The default is None, which sets *pad_to*
equal to the length of the input signal (i.e. no padding).""",
PSD="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. This can be different from *NFFT*, which specifies the number
of data points used. While not increasing the actual resolution of the
spectrum (the minimum distance between resolvable peaks), this can give
more points in the plot, allowing for more detail. This corresponds to
the *n* parameter in the call to `~numpy.fft.fft`. The default is None,
which sets *pad_to* equal to *NFFT*
NFFT : int, default: 256
The number of data points used in each block for the FFT. A power 2 is
most efficient. This should *NOT* be used to get zero padding, or the
scaling of the result will be incorrect; use *pad_to* for this instead.
detrend : {'none', 'mean', 'linear'} or callable, default: 'none'
The function applied to each segment before fft-ing, designed to remove
the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter
is a vector, in Matplotlib it is a function. The :mod:`~matplotlib.mlab`
module defines `.detrend_none`, `.detrend_mean`, and `.detrend_linear`,
but you can use a custom function as well. You can also use a string to
choose one of the functions: 'none' calls `.detrend_none`. 'mean' calls
`.detrend_mean`. 'linear' calls `.detrend_linear`.
scale_by_freq : bool, default: True
Whether the resulting density values should be scaled by the scaling
frequency, which gives density in units of 1/Hz. This allows for
integration over the returned frequency values. The default is True for
MATLAB compatibility.""")
@_docstring.interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
r"""
Compute the power spectral density.
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxx : 1-D array
The values for the power spectrum :math:`P_{xx}` (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
specgram
`specgram` differs in the default overlap; in not returning the mean of
the segment periodograms; and in returning the times of the segments.
magnitude_spectrum : returns the magnitude spectrum.
csd : returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@_docstring.interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxy : 1-D array
The values for the cross spectrum :math:`P_{xy}` before scaling (real
valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxy*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
psd : equivalent to setting ``y = x``.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if Pxy.ndim == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
_single_spectrum_docs = """\
Compute the {quantity} of *x*.
Data is padded to a length of *pad_to* and the windowing function *window* is
applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
{Spectral}
{Single_Spectrum}
Returns
-------
spectrum : 1-D array
The {quantity}.
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
See Also
--------
psd
Returns the power spectral density.
complex_spectrum
Returns the complex-valued frequency spectrum.
magnitude_spectrum
Returns the absolute value of the `complex_spectrum`.
angle_spectrum
Returns the angle of the `complex_spectrum`.
phase_spectrum
Returns the phase (unwrapped angle) of the `complex_spectrum`.
specgram
Can return the complex spectrum of segments within the signal.
"""
complex_spectrum = functools.partial(_single_spectrum_helper, "complex")
complex_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="complex-valued frequency spectrum",
**_docstring.interpd.params)
magnitude_spectrum = functools.partial(_single_spectrum_helper, "magnitude")
magnitude_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="magnitude (absolute value) of the frequency spectrum",
**_docstring.interpd.params)
angle_spectrum = functools.partial(_single_spectrum_helper, "angle")
angle_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="angle of the frequency spectrum (wrapped phase spectrum)",
**_docstring.interpd.params)
phase_spectrum = functools.partial(_single_spectrum_helper, "phase")
phase_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="phase of the frequency spectrum (unwrapped phase spectrum)",
**_docstring.interpd.params)
@_docstring.interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
Parameters
----------
x : array-like
1-D array or sequence.
%(Spectral)s
%(PSD)s
noverlap : int, default: 128
The number of points of overlap between blocks.
mode : str, default: 'psd'
What sort of spectrum to use:
'psd'
Returns the power spectral density.
'complex'
Returns the complex-valued frequency spectrum.
'magnitude'
Returns the magnitude spectrum.
'angle'
Returns the phase spectrum without unwrapping.
'phase'
Returns the phase spectrum with unwrapping.
Returns
-------
spectrum : array-like
2D array, columns are the periodograms of successive segments.
freqs : array-like
1-D array, frequencies corresponding to the rows in *spectrum*.
t : array-like
1-D array, the times corresponding to midpoints of segments
(i.e the columns in *spectrum*).
See Also
--------
psd : differs in the overlap and in the return values.
complex_spectrum : similar, but with complex valued frequencies.
magnitude_spectrum : similar single segment when *mode* is 'magnitude'.
angle_spectrum : similar to single segment when *mode* is 'angle'.
phase_spectrum : similar to single segment when *mode* is 'phase'.
Notes
-----
*detrend* and *scale_by_freq* only apply when *mode* is set to 'psd'.
"""
if noverlap is None:
noverlap = 128 # default in _spectral_helper() is noverlap = 0
if NFFT is None:
NFFT = 256 # same default as in _spectral_helper()
if len(x) <= NFFT:
_api.warn_external("Only one segment is calculated since parameter "
f"NFFT (={NFFT}) >= signal length (={len(x)}).")
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
@_docstring.interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
r"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \frac{|P_{xy}|^2}{P_{xx}P_{yy}}
Parameters
----------
x, y
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Cxy : 1-D array
The coherence vector.
freqs : 1-D array
The frequencies for the elements in *Cxy*.
See Also
--------
:func:`psd`, :func:`csd` :
For information about the methods used to compute :math:`P_{xy}`,
:math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(
"Coherence is calculated by averaging over *NFFT* length "
"segments. Your signal is too short for your choice of *NFFT*.")
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy)
return Cxy, f
class GaussianKDE:
"""
Representation of a kernel-density estimate using Gaussian kernels.
Parameters
----------
dataset : array-like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2D array with shape (# of dims, # of data).
bw_method : {'scott', 'silverman'} or float or callable, optional
The method used to calculate the estimator bandwidth. If a
float, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a float. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset passed to the constructor.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of *dataset*, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of *covariance*.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = np.atleast_2d(dataset)
if not np.array(self.dataset).size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.dim, self.num_dp = np.array(self.dataset).shape
if bw_method is None:
pass
elif cbook._str_equal(bw_method, 'scott'):
self.covariance_factor = self.scotts_factor
elif cbook._str_equal(bw_method, 'silverman'):
self.covariance_factor = self.silverman_factor
elif isinstance(bw_method, Number):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
raise ValueError("`bw_method` should be 'scott', 'silverman', a "
"scalar or a callable")
# Computes the covariance matrix for each Gaussian kernel using
# covariance_factor().
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self.data_covariance = np.atleast_2d(
np.cov(
self.dataset,
rowvar=1,
bias=False))
self.data_inv_cov = np.linalg.inv(self.data_covariance)
self.covariance = self.data_covariance * self.factor ** 2
self.inv_cov = self.data_inv_cov / self.factor ** 2
self.norm_factor = (np.sqrt(np.linalg.det(2 * np.pi * self.covariance))
* self.num_dp)
def scotts_factor(self):
return np.power(self.num_dp, -1. / (self.dim + 4))
def silverman_factor(self):
return np.power(
self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def evaluate(self, points):
"""
Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
(# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different
than the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
dim, num_m = np.array(points).shape
if dim != self.dim:
raise ValueError(f"points have dimension {dim}, dataset has "
f"dimension {self.dim}")
result = np.zeros(num_m)
if num_m >= self.num_dp:
# there are more points than data, so loop over data
for i in range(self.num_dp):
diff = self.dataset[:, i, np.newaxis] - points
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result = result + np.exp(-energy)
else:
# loop over points
for i in range(num_m):
diff = self.dataset - points[:, i, np.newaxis]
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result[i] = np.sum(np.exp(-energy), axis=0)
result = result / self.norm_factor
return result
__call__ = evaluate
venv\Lib\site-packages\matplotlib\offsetbox.py
r"""
Container classes for `.Artist`\s.
`OffsetBox`
The base of all container artists defined in this module.
`AnchoredOffsetbox`, `AnchoredText`
Anchor and align an arbitrary `.Artist` or a text relative to the parent
axes or a specific anchor point.
`DrawingArea`
A container with fixed width and height. Children have a fixed position
inside the container and may be clipped.
`HPacker`, `VPacker`
Containers for layouting their children vertically or horizontally.
`PaddedBox`
A container to add a padding around an `.Artist`.
`TextArea`
Contains a single `.Text` instance.
"""
import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
DEBUG = False
def _compat_get_offset(meth):
"""
Decorator for the get_offset method of OffsetBox and subclasses, that
allows supporting both the new signature (self, bbox, renderer) and the old
signature (self, width, height, xdescent, ydescent, renderer).
"""
sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(),
lambda self, bbox, renderer: locals()]
@functools.wraps(meth)
def get_offset(self, *args, **kwargs):
params = _api.select_matching_signature(sigs, self, *args, **kwargs)
bbox = (params["bbox"] if "bbox" in params else
Bbox.from_bounds(-params["xdescent"], -params["ydescent"],
params["width"], params["height"]))
return meth(params["self"], bbox, params["renderer"])
return get_offset
# for debugging use
def _bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
def _get_packed_offsets(widths, total, sep, mode="fixed"):
r"""
Pack boxes specified by their *widths*.
For simplicity of the description, the terminology used here assumes a
horizontal layout, but the function works equally for a vertical layout.
There are three packing *mode*\s:
- 'fixed': The elements are packed tight to the left with a spacing of
*sep* in between. If *total* is *None* the returned total will be the
right edge of the last box. A non-*None* total will be passed unchecked
to the output. In particular this means that right edge of the last
box may be further to the right than the returned total.
- 'expand': Distribute the boxes with equal spacing so that the left edge
of the first box is at 0, and the right edge of the last box is at
*total*. The parameter *sep* is ignored in this mode. A total of *None*
is accepted and considered equal to 1. The total is returned unchanged
(except for the conversion *None* to 1). If the total is smaller than
the sum of the widths, the laid out boxes will overlap.
- 'equal': If *total* is given, the total space is divided in N equal
ranges and each box is left-aligned within its subspace.
Otherwise (*total* is *None*), *sep* must be provided and each box is
left-aligned in its subspace of width ``(max(widths) + sep)``. The
total width is then calculated to be ``N * (max(widths) + sep)``.
Parameters
----------
widths : list of float
Widths of boxes to be packed.
total : float or None
Intended total length. *None* if not used.
sep : float or None
Spacing between boxes.
mode : {'fixed', 'expand', 'equal'}
The packing mode.
Returns
-------
total : float
The total width needed to accommodate the laid out boxes.
offsets : array of float
The left offsets of the boxes.
"""
_api.check_in_list(["fixed", "expand", "equal"], mode=mode)
if mode == "fixed":
offsets_ = np.cumsum([0] + [w + sep for w in widths])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
# This is a bit of a hack to avoid a TypeError when *total*
# is None and used in conjugation with tight layout.
if total is None:
total = 1
if len(widths) > 1:
sep = (total - sum(widths)) / (len(widths) - 1)
else:
sep = 0
offsets_ = np.cumsum([0] + [w + sep for w in widths])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(widths)
if total is None:
if sep is None:
raise ValueError("total and sep cannot both be None when "
"using layout mode 'equal'")
total = (maxh + sep) * len(widths)
else:
sep = total / len(widths) - maxh
offsets = (maxh + sep) * np.arange(len(widths))
return total, offsets
def _get_aligned_offsets(yspans, height, align="baseline"):
"""
Align boxes each specified by their ``(y0, y1)`` spans.
For simplicity of the description, the terminology used here assumes a
horizontal layout (i.e., vertical alignment), but the function works
equally for a vertical layout.
Parameters
----------
yspans
List of (y0, y1) spans of boxes to be aligned.
height : float or None
Intended total height. If None, the maximum of the heights
(``y1 - y0``) in *yspans* is used.
align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'}
The alignment anchor of the boxes.
Returns
-------
(y0, y1)
y range spanned by the packing. If a *height* was originally passed
in, then for all alignments other than "baseline", a span of ``(0,
height)`` is used without checking that it is actually large enough).
descent
The descent of the packing.
offsets
The bottom offsets of the boxes.
"""
_api.check_in_list(
["baseline", "left", "top", "right", "bottom", "center"], align=align)
if height is None:
height = max(y1 - y0 for y0, y1 in yspans)
if align == "baseline":
yspan = (min(y0 for y0, y1 in yspans), max(y1 for y0, y1 in yspans))
offsets = [0] * len(yspans)
elif align in ["left", "bottom"]:
yspan = (0, height)
offsets = [-y0 for y0, y1 in yspans]
elif align in ["right", "top"]:
yspan = (0, height)
offsets = [height - y1 for y0, y1 in yspans]
elif align == "center":
yspan = (0, height)
offsets = [(height - (y1 - y0)) * .5 - y0 for y0, y1 in yspans]
return yspan, offsets
class OffsetBox(martist.Artist):
"""
A simple container artist.
The child artists are meant to be drawn at a relative position to its
parent.
Being an artist itself, all parameters are passed on to `.Artist`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._internal_update(kwargs)
# Clipping has not been implemented in the OffsetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the `.Figure` for the `.OffsetBox` and all its children.
Parameters
----------
fig : `~matplotlib.figure.Figure`
"""
super().set_figure(fig)
for c in self.get_children():
c.set_figure(fig)
@martist.Artist.axes.setter
def axes(self, ax):
# TODO deal with this better
martist.Artist.axes.fset(self, ax)
for c in self.get_children():
if c is not None:
c.axes = ax
def contains(self, mouseevent):
"""
Delegate the mouse event contains-check to the children.
As a container, the `.OffsetBox` does not respond itself to
mouseevents.
Parameters
----------
mouseevent : `~matplotlib.backend_bases.MouseEvent`
Returns
-------
contains : bool
Whether any values are within the radius.
details : dict
An artist-specific dictionary of details of the event context,
such as which points are contained in the pick radius. See the
individual Artist subclasses for details.
See Also
--------
.Artist.contains
"""
if self._different_canvas(mouseevent):
return False, {}
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset.
Parameters
----------
xy : (float, float) or callable
The (x, y) coordinates of the offset in display units. These can
either be given explicitly as a tuple (x, y), or by providing a
function that converts the extent into the offset. This function
must have the signature::
def offset(width, height, xdescent, ydescent, renderer) \
-> (float, float)
"""
self._offset = xy
self.stale = True
@_compat_get_offset
def get_offset(self, bbox, renderer):
"""
Return the offset as a tuple (x, y).
The extent parameters have to be provided to handle the case where the
offset is dynamically determined by a callable (see
`~.OffsetBox.set_offset`).
Parameters
----------
bbox : `.Bbox`
renderer : `.RendererBase` subclass
"""
return (
self._offset(bbox.width, bbox.height, -bbox.x0, -bbox.y0, renderer)
if callable(self._offset)
else self._offset)
def set_width(self, width):
"""
Set the width of the box.
Parameters
----------
width : float
"""
self.width = width
self.stale = True
def set_height(self, height):
"""
Set the height of the box.
Parameters
----------
height : float
"""
self.height = height
self.stale = True
def get_visible_children(self):
r"""Return a list of the visible child `.Artist`\s."""
return [c for c in self._children if c.get_visible()]
def get_children(self):
r"""Return a list of the child `.Artist`\s."""
return self._children
def _get_bbox_and_child_offsets(self, renderer):
"""
Return the bbox of the offsetbox and the child offsets.
The bbox should satisfy ``x0 <= x1 and y0 <= y1``.
Parameters
----------
renderer : `.RendererBase` subclass
Returns
-------
bbox
list of (xoffset, yoffset) pairs
"""
raise NotImplementedError(
"get_bbox_and_offsets must be overridden in derived classes")
def get_bbox(self, renderer):
"""Return the bbox of the offsetbox, ignoring parent offsets."""
bbox, offsets = self._get_bbox_and_child_offsets(renderer)
return bbox
def get_window_extent(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
bbox = self.get_bbox(renderer)
try: # Some subclasses redefine get_offset to take no args.
px, py = self.get_offset(bbox, renderer)
except TypeError:
px, py = self.get_offset()
return bbox.translated(px, py)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
bbox, offsets = self._get_bbox_and_child_offsets(renderer)
px, py = self.get_offset(bbox, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class PackerBase(OffsetBox):
def __init__(self, pad=0., sep=0., width=None, height=None,
align="baseline", mode="fixed", children=None):
"""
Parameters
----------
pad : float, default: 0.0
The boundary padding in points.
sep : float, default: 0.0
The spacing between items in points.
width, height : float, optional
Width and height of the container box in pixels, calculated if
*None*.
align : {'top', 'bottom', 'left', 'right', 'center', 'baseline'}, \
default: 'baseline'
Alignment of boxes.
mode : {'fixed', 'expand', 'equal'}, default: 'fixed'
The packing mode.
- 'fixed' packs the given `.Artist`\\s tight with *sep* spacing.
- 'expand' uses the maximal available space to distribute the
artists with equal spacing in between.
- 'equal': Each artist an equal fraction of the available space
and is left-aligned (or top-aligned) therein.
children : list of `.Artist`
The artists to pack.
Notes
-----
*pad* and *sep* are in points and will be scaled with the renderer
dpi, while *width* and *height* are in pixels.
"""
super().__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
VPacker packs its children vertically, automatically adjusting their
relative positions at draw time.
.. code-block:: none
+---------+
| Child 1 |
| Child 2 |
| Child 3 |
+---------+
"""
def _get_bbox_and_child_offsets(self, renderer):
# docstring inherited
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
bboxes = [c.get_bbox(renderer) for c in self.get_visible_children()]
(x0, x1), xoffsets = _get_aligned_offsets(
[bbox.intervalx for bbox in bboxes], self.width, self.align)
height, yoffsets = _get_packed_offsets(
[bbox.height for bbox in bboxes], self.height, sep, self.mode)
yoffsets = height - (yoffsets + [bbox.y1 for bbox in bboxes])
ydescent = yoffsets[0]
yoffsets = yoffsets - ydescent
return (
Bbox.from_bounds(x0, -ydescent, x1 - x0, height).padded(pad),
[*zip(xoffsets, yoffsets)])
class HPacker(PackerBase):
"""
HPacker packs its children horizontally, automatically adjusting their
relative positions at draw time.
.. code-block:: none
+-------------------------------+
| Child 1 Child 2 Child 3 |
+-------------------------------+
"""
def _get_bbox_and_child_offsets(self, renderer):
# docstring inherited
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
bboxes = [c.get_bbox(renderer) for c in self.get_visible_children()]
if not bboxes:
return Bbox.from_bounds(0, 0, 0, 0).padded(pad), []
(y0, y1), yoffsets = _get_aligned_offsets(
[bbox.intervaly for bbox in bboxes], self.height, self.align)
width, xoffsets = _get_packed_offsets(
[bbox.width for bbox in bboxes], self.width, sep, self.mode)
x0 = bboxes[0].x0
xoffsets -= ([bbox.x0 for bbox in bboxes] - x0)
return (Bbox.from_bounds(x0, y0, width, y1 - y0).padded(pad),
[*zip(xoffsets, yoffsets)])
class PaddedBox(OffsetBox):
"""
A container to add a padding around an `.Artist`.
The `.PaddedBox` contains a `.FancyBboxPatch` that is used to visualize
it when rendering.
.. code-block:: none
+----------------------------+
| |
| |
| |
| <--pad--> Artist |
| ^ |
| pad |
| v |
+----------------------------+
Attributes
----------
pad : float
The padding in points.
patch : `.FancyBboxPatch`
When *draw_frame* is True, this `.FancyBboxPatch` is made visible and
creates a border around the box.
"""
def __init__(self, child, pad=0., *, draw_frame=False, patch_attrs=None):
"""
Parameters
----------
child : `~matplotlib.artist.Artist`
The contained `.Artist`.
pad : float, default: 0.0
The padding in points. This will be scaled with the renderer dpi.
In contrast, *width* and *height* are in *pixels* and thus not
scaled.
draw_frame : bool
Whether to draw the contained `.FancyBboxPatch`.
patch_attrs : dict or None
Additional parameters passed to the contained `.FancyBboxPatch`.
"""
super().__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True,
visible=draw_frame,
boxstyle="square,pad=0",
)
if patch_attrs is not None:
self.patch.update(patch_attrs)
def _get_bbox_and_child_offsets(self, renderer):
# docstring inherited.
pad = self.pad * renderer.points_to_pixels(1.)
return (self._children[0].get_bbox(renderer).padded(pad), [(0, 0)])
def draw(self, renderer):
# docstring inherited
bbox, offsets = self._get_bbox_and_child_offsets(renderer)
px, py = self.get_offset(bbox, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
self.stale = False
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.bounds)
if fontsize:
self.patch.set_mutation_scale(fontsize)
self.stale = True
def draw_frame(self, renderer):
# update the location and size of the legend
self.update_frame(self.get_window_extent(renderer))
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed. The children can be clipped at the
boundaries of the parent.
"""
def __init__(self, width, height, xdescent=0., ydescent=0., clip=False):
"""
Parameters
----------
width, height : float
Width and height of the container box.
xdescent, ydescent : float
Descent of the box in x- and y-direction.
clip : bool
Whether to clip the children to the box.
"""
super().__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self._clip_children = clip
self.offset_transform = mtransforms.Affine2D()
self.dpi_transform = mtransforms.Affine2D()
@property
def clip_children(self):
"""
If the children of this DrawingArea should be clipped
by DrawingArea bounding box.
"""
return self._clip_children
@clip_children.setter
def clip_children(self, val):
self._clip_children = bool(val)
self.stale = True
def get_transform(self):
"""
Return the `~matplotlib.transforms.Transform` applied to the children.
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
def set_offset(self, xy):
"""
Set the offset of the container.
Parameters
----------
xy : (float, float)
The (x, y) coordinates of the offset in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_bbox(self, renderer):
# docstring inherited
dpi_cor = renderer.points_to_pixels(1.)
return Bbox.from_bounds(
-self.xdescent * dpi_cor, -self.ydescent * dpi_cor,
self.width * dpi_cor, self.height * dpi_cor)
def add_artist(self, a):
"""Add an `.Artist` to the container box."""
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
if self.axes is not None:
a.axes = self.axes
fig = self.get_figure(root=False)
if fig is not None:
a.set_figure(fig)
def draw(self, renderer):
# docstring inherited
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor)
# At this point the DrawingArea has a transform
# to the display space so the path created is
# good for clipping children
tpath = mtransforms.TransformedPath(
mpath.Path([[0, 0], [0, self.height],
[self.width, self.height],
[self.width, 0]]),
self.get_transform())
for c in self._children:
if self._clip_children and not (c.clipbox or c._clippath):
c.set_clip_path(tpath)
c.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class TextArea(OffsetBox):
"""
The TextArea is a container artist for a single Text instance.
The text is placed at (0, 0) with baseline+left alignment, by default. The
width and height of the TextArea instance is the width and height of its
child text.
"""
def __init__(self, s,
*,
textprops=None,
multilinebaseline=False,
):
"""
Parameters
----------
s : str
The text to be displayed.
textprops : dict, default: {}
Dictionary of keyword parameters to be passed to the `.Text`
instance in the TextArea.
multilinebaseline : bool, default: False
Whether the baseline for multiline text is adjusted so that it
is (approximately) center-aligned with single-line text.
"""
if textprops is None:
textprops = {}
self._text = mtext.Text(0, 0, s, **textprops)
super().__init__()
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
def set_text(self, s):
"""Set the text of this area as a string."""
self._text.set_text(s)
self.stale = True
def get_text(self):
"""Return the string representation of this area's text."""
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline.
If True, the baseline for multiline text is adjusted so that it is
(approximately) center-aligned with single-line text. This is used
e.g. by the legend implementation so that single-line labels are
baseline-aligned, but multiline labels are "center"-aligned with them.
"""
self._multilinebaseline = t
self.stale = True
def get_multilinebaseline(self):
"""
Get multilinebaseline.
"""
return self._multilinebaseline
def set_transform(self, t):
"""
set_transform is ignored.
"""
def set_offset(self, xy):
"""
Set the offset of the container.
Parameters
----------
xy : (float, float)
The (x, y) coordinates of the offset in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_bbox(self, renderer):
_, h_, d_ = mtext._get_text_metrics_with_cache(
renderer, "lp", self._text._fontproperties,
ismath="TeX" if self._text.get_usetex() else False,
dpi=self.get_figure(root=True).dpi)
bbox, info, yd = self._text._get_layout(renderer)
w, h = bbox.size
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
yd_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, yd - yd_new)
yd = yd_new
else: # single line
h_d = max(h_ - d_, h - yd)
h = h_d + yd
ha = self._text.get_horizontalalignment()
x0 = {"left": 0, "center": -w / 2, "right": -w}[ha]
return Bbox.from_bounds(x0, -yd, w, h)
def draw(self, renderer):
# docstring inherited
self._text.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AuxTransformBox(OffsetBox):
"""
An OffsetBox with an auxiliary transform.
All child artists are first transformed with *aux_transform*, then
translated with an offset (the same for all children) so the bounding
box of the children matches the drawn box. (In other words, adding an
arbitrary translation to *aux_transform* has no effect as it will be
cancelled out by the later offsetting.)
`AuxTransformBox` is similar to `.DrawingArea`, except that the extent of
the box is not predetermined but calculated from the window extent of its
children, and the extent of the children will be calculated in the
transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
super().__init__()
self.offset_transform = mtransforms.Affine2D()
# ref_offset_transform makes offset_transform always relative to the
# lower-left corner of the bbox of its children.
self.ref_offset_transform = mtransforms.Affine2D()
def add_artist(self, a):
"""Add an `.Artist` to the container box."""
self._children.append(a)
a.set_transform(self.get_transform())
self.stale = True
def get_transform(self):
"""Return the `.Transform` applied to the children."""
return (self.aux_transform
+ self.ref_offset_transform
+ self.offset_transform)
def set_transform(self, t):
"""
set_transform is ignored.
"""
def set_offset(self, xy):
"""
Set the offset of the container.
Parameters
----------
xy : (float, float)
The (x, y) coordinates of the offset in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_bbox(self, renderer):
# clear the offset transforms
_off = self.offset_transform.get_matrix() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = Bbox.union(bboxes)
# adjust ref_offset_transform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restore offset transform
self.offset_transform.set_matrix(_off)
return Bbox.from_bounds(0, 0, ub.width, ub.height)
def draw(self, renderer):
# docstring inherited
for c in self._children:
c.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnchoredOffsetbox(OffsetBox):
"""
An OffsetBox placed according to location *loc*.
AnchoredOffsetbox has a single child. When multiple children are needed,
use an extra OffsetBox to enclose them. By default, the offset box is
anchored against its parent Axes. You may explicitly specify the
*bbox_to_anchor*.
"""
zorder = 5 # zorder of the legend
# Location codes
codes = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def __init__(self, loc, *,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
Parameters
----------
loc : str
The box location. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center', 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
pad : float, default: 0.4
Padding around the child as fraction of the fontsize.
borderpad : float, default: 0.5
Padding between the offsetbox frame and the *bbox_to_anchor*.
child : `.OffsetBox`
The box that will be anchored.
prop : `.FontProperties`
This is only used as a reference for paddings. If not given,
:rc:`legend.fontsize` is used.
frameon : bool
Whether to draw a frame around the box.
bbox_to_anchor : `.BboxBase`, 2-tuple, or 4-tuple of floats
Box that is used to position the legend in conjunction with *loc*.
bbox_transform : None or :class:`matplotlib.transforms.Transform`
The transform for the bounding box (*bbox_to_anchor*).
**kwargs
All other parameters are passed on to `.OffsetBox`.
Notes
-----
See `.Legend` for a detailed description of the anchoring mechanism.
"""
super().__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
if isinstance(loc, str):
loc = _api.check_getitem(self.codes, loc=loc)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=mpl.rcParams["legend.fontsize"])
else:
self.prop = FontProperties._from_any(prop)
if isinstance(prop, dict) and "size" not in prop:
self.prop.set_size(mpl.rcParams["legend.fontsize"])
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True,
visible=frameon,
boxstyle="square,pad=0",
)
def set_child(self, child):
"""Set the child to be anchored."""
self._child = child
if child is not None:
child.axes = self.axes
self.stale = True
def get_child(self):
"""Return the child."""
return self._child
def get_children(self):
"""Return the list of children."""
return [self._child]
def get_bbox(self, renderer):
# docstring inherited
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return self.get_child().get_bbox(renderer).padded(pad)
def get_bbox_to_anchor(self):
"""Return the bbox that the box is anchored to."""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor, transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
Set the bbox that the box is anchored to.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError as err:
raise ValueError(f"Invalid bbox: {bbox}") from err
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
self.stale = True
@_compat_get_offset
def get_offset(self, bbox, renderer):
# docstring inherited
pad = (self.borderpad
* renderer.points_to_pixels(self.prop.get_size_in_points()))
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = _get_anchored_bbox(
self.loc, Bbox.from_bounds(0, 0, bbox.width, bbox.height),
bbox_to_anchor, pad)
return x0 - bbox.x0, y0 - bbox.y0
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.bounds)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
px, py = self.get_offset(self.get_bbox(renderer), renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
self.stale = False
def _get_anchored_bbox(loc, bbox, parentbbox, borderpad):
"""
Return the (x, y) position of the *bbox* anchored at the *parentbbox* with
the *loc* code with the *borderpad*.
"""
# This is only called internally and *loc* should already have been
# validated. If 0 (None), we just let ``bbox.anchored`` raise.
c = [None, "NE", "NW", "SW", "SE", "E", "W", "E", "S", "N", "C"][loc]
container = parentbbox.padded(-borderpad)
return bbox.anchored(c, container=container).p0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, *, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : str
Text.
loc : str
Location code. See `AnchoredOffsetbox`.
pad : float, default: 0.4
Padding around the text as fraction of the fontsize.
borderpad : float, default: 0.5
Spacing between the offsetbox frame and the *bbox_to_anchor*.
prop : dict, optional
Dictionary of keyword parameters to be passed to the
`~matplotlib.text.Text` instance contained inside AnchoredText.
**kwargs
All other parameters are passed to `AnchoredOffsetbox`.
"""
if prop is None:
prop = {}
badkwargs = {'va', 'verticalalignment'}
if badkwargs & set(prop):
raise ValueError(
'Mixing verticalalignment with AnchoredText is not supported.')
self.txt = TextArea(s, textprops=prop)
fp = self.txt._text.get_fontproperties()
super().__init__(
loc, pad=pad, borderpad=borderpad, child=self.txt, prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr, *,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
super().__init__()
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
self.stale = True
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
self.stale = True
def get_zoom(self):
return self._zoom
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_children(self):
return [self.image]
def get_bbox(self, renderer):
dpi_cor = renderer.points_to_pixels(1.) if self._dpi_cor else 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = dpi_cor * nx * zoom, dpi_cor * ny * zoom
return Bbox.from_bounds(0, 0, w, h)
def draw(self, renderer):
# docstring inherited
self.image.draw(renderer)
# bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnnotationBbox(martist.Artist, mtext._AnnotationBase):
"""
Container for an `OffsetBox` referring to a specific position *xy*.
Optionally an arrow pointing from the offsetbox to *xy* can be drawn.
This is like `.Annotation`, but with `OffsetBox` instead of `.Text`.
"""
zorder = 3
def __str__(self):
return f"AnnotationBbox({self.xy[0]:g},{self.xy[1]:g})"
@_docstring.interpd
def __init__(self, offsetbox, xy, xybox=None, xycoords='data', boxcoords=None, *,
frameon=True, pad=0.4, # FancyBboxPatch boxstyle.
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
Parameters
----------
offsetbox : `OffsetBox`
xy : (float, float)
The point *(x, y)* to annotate. The coordinate system is determined
by *xycoords*.
xybox : (float, float), default: *xy*
The position *(x, y)* to place the text at. The coordinate system
is determined by *boxcoords*.
xycoords : single or two-tuple of str or `.Artist` or `.Transform` or \
callable, default: 'data'
The coordinate system that *xy* is given in. See the parameter
*xycoords* in `.Annotation` for a detailed description.
boxcoords : single or two-tuple of str or `.Artist` or `.Transform` \
or callable, default: value of *xycoords*
The coordinate system that *xybox* is given in. See the parameter
*textcoords* in `.Annotation` for a detailed description.
frameon : bool, default: True
By default, the text is surrounded by a white `.FancyBboxPatch`
(accessible as the ``patch`` attribute of the `.AnnotationBbox`).
If *frameon* is set to False, this patch is made invisible.
annotation_clip: bool or None, default: None
Whether to clip (i.e. not draw) the annotation when the annotation
point *xy* is outside the Axes area.
- If *True*, the annotation will be clipped when *xy* is outside
the Axes.
- If *False*, the annotation will always be drawn.
- If *None*, the annotation will be clipped when *xy* is outside
the Axes and *xycoords* is 'data'.
pad : float, default: 0.4
Padding around the offsetbox.
box_alignment : (float, float)
A tuple of two floats for a vertical and horizontal alignment of
the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0, 0) and upper-right corner is (1, 1).
bboxprops : dict, optional
A dictionary of properties to set for the annotation bounding box,
for example *boxstyle* and *alpha*. See `.FancyBboxPatch` for
details.
arrowprops: dict, optional
Arrow properties, see `.Annotation` for description.
fontsize: float or str, optional
Translated to points and passed as *mutation_scale* into
`.FancyBboxPatch` to scale attributes of the box style (e.g. pad
or rounding_size). The name is chosen in analogy to `.Text` where
*fontsize* defines the mutation scale as well. If not given,
:rc:`legend.fontsize` is used. See `.Text.set_fontsize` for valid
values.
**kwargs
Other `AnnotationBbox` properties. See `.AnnotationBbox.set` for
a list.
"""
martist.Artist.__init__(self)
mtext._AnnotationBase.__init__(
self, xy, xycoords=xycoords, annotation_clip=annotation_clip)
self.offsetbox = offsetbox
self.arrowprops = arrowprops.copy() if arrowprops is not None else None
self.set_fontsize(fontsize)
self.xybox = xybox if xybox is not None else xy
self.boxcoords = boxcoords if boxcoords is not None else xycoords
self._box_alignment = box_alignment
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
self.patch = FancyBboxPatch( # frame
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True,
visible=frameon,
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._internal_update(kwargs)
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
self.stale = True
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
self.stale = True
def contains(self, mouseevent):
if self._different_canvas(mouseevent):
return False, {}
if not self._check_xy(None):
return False, {}
return self.offsetbox.contains(mouseevent)
# self.arrow_patch is currently not checked as this can be a line - JJ
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
Set the fontsize in points.
If *s* is not given, reset to :rc:`legend.fontsize`.
"""
if s is None:
s = mpl.rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
self.stale = True
def get_fontsize(self):
"""Return the fontsize in points."""
return self.prop.get_size_in_points()
def get_window_extent(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
self.update_positions(renderer)
return Bbox.union([child.get_window_extent(renderer)
for child in self.get_children()])
def get_tightbbox(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
self.update_positions(renderer)
return Bbox.union([child.get_tightbbox(renderer)
for child in self.get_children()])
def update_positions(self, renderer):
"""Update pixel positions for the annotated point, the text, and the arrow."""
ox0, oy0 = self._get_xy(renderer, self.xybox, self.boxcoords)
bbox = self.offsetbox.get_bbox(renderer)
fw, fh = self._box_alignment
self.offsetbox.set_offset(
(ox0 - fw*bbox.width - bbox.x0, oy0 - fh*bbox.height - bbox.y0))
bbox = self.offsetbox.get_window_extent(renderer)
self.patch.set_bounds(bbox.bounds)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrowprops:
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Adjust the starting point of the arrow relative to the textbox.
# TODO: Rotation needs to be accounted.
arrow_begin = bbox.p0 + bbox.size * self._arrow_relpos
arrow_end = self._get_position_xy(renderer)
# The arrow (from arrow_begin to arrow_end) will be first clipped
# by patchA and patchB, then shrunk by shrinkA and shrinkB (in
# points). If patch A is not set, self.bbox_patch is used.
self.arrow_patch.set_positions(arrow_begin, arrow_end)
if "mutation_scale" in self.arrowprops:
mutation_scale = renderer.points_to_pixels(
self.arrowprops["mutation_scale"])
# Else, use fontsize-based mutation_scale defined above.
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = self.arrowprops.get("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
# docstring inherited
if not self.get_visible() or not self._check_xy(renderer):
return
renderer.open_group(self.__class__.__name__, gid=self.get_gid())
self.update_positions(renderer)
if self.arrow_patch is not None:
if (self.arrow_patch.get_figure(root=False) is None and
(fig := self.get_figure(root=False)) is not None):
self.arrow_patch.set_figure(fig)
self.arrow_patch.draw(renderer)
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
renderer.close_group(self.__class__.__name__)
self.stale = False
class DraggableBase:
"""
Helper base class for a draggable artist (legend, offsetbox).
Derived classes must override the following methods::
def save_offset(self):
'''
Called when the object is picked for dragging; should save the
reference position of the artist.
'''
def update_offset(self, dx, dy):
'''
Called during the dragging; (*dx*, *dy*) is the pixel offset from
the point where the mouse drag started.
'''
Optionally, you may override the following method::
def finalize_offset(self):
'''Called when the mouse is released.'''
In the current implementation of `.DraggableLegend` and
`DraggableAnnotation`, `update_offset` places the artists in display
coordinates, and `finalize_offset` recalculates their position in axes
coordinate and set a relevant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
if not ref_artist.pickable():
ref_artist.set_picker(self._picker)
self.got_artist = False
self._use_blit = use_blit and self.canvas.supports_blit
callbacks = self.canvas.callbacks
self._disconnectors = [
functools.partial(
callbacks.disconnect, callbacks._connect_picklable(name, func))
for name, func in [
("pick_event", self.on_pick),
("button_release_event", self.on_release),
("motion_notify_event", self.on_motion),
]
]
@staticmethod
def _picker(artist, mouseevent):
# A custom picker to prevent dragging on mouse scroll events
if mouseevent.name == "scroll_event":
return False, {}
return artist.contains(mouseevent)
# A property, not an attribute, to maintain picklability.
canvas = property(lambda self: self.ref_artist.get_figure(root=True).canvas)
cids = property(lambda self: [
disconnect.args[0] for disconnect in self._disconnectors[:2]])
def on_motion(self, evt):
if self._check_still_parented() and self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
if self._use_blit:
self.canvas.restore_region(self.background)
self.ref_artist.draw(
self.ref_artist.get_figure(root=True)._get_renderer())
self.canvas.blit()
else:
self.canvas.draw()
def on_pick(self, evt):
if self._check_still_parented():
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.save_offset()
self.got_artist = True
if self.got_artist and self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
fig = self.ref_artist.get_figure(root=False)
self.background = self.canvas.copy_from_bbox(fig.bbox)
self.ref_artist.draw(fig._get_renderer())
self.canvas.blit()
def on_release(self, event):
if self._check_still_parented() and self.got_artist:
self.finalize_offset()
self.got_artist = False
if self._use_blit:
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._get_renderer())
self.canvas.blit()
self.ref_artist.set_animated(False)
def _check_still_parented(self):
if self.ref_artist.get_figure(root=False) is None:
self.disconnect()
return False
else:
return True
def disconnect(self):
"""Disconnect the callbacks."""
for disconnector in self._disconnectors:
disconnector()
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
super().__init__(ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.get_figure(root=True)._get_renderer()
offset = offsetbox.get_offset(offsetbox.get_bbox(renderer), renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.get_figure(root=True)._get_renderer()
bbox = offsetbox.get_bbox(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox + bbox.x0, oy + bbox.y0)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
super().__init__(annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
self.ox, self.oy = ann.get_transform().transform(ann.xyann)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = ann.get_transform().inverted().transform(
(self.ox + dx, self.oy + dy))
venv\Lib\site-packages\matplotlib\patches.py
r"""
Patches are `.Artist`\s with a face color and an edge color.
"""
import functools
import inspect
import math
from numbers import Number, Real
import textwrap
from types import SimpleNamespace
from collections import namedtuple
from matplotlib.transforms import Affine2D
import numpy as np
import matplotlib as mpl
from . import (_api, artist, cbook, colors, _docstring, hatch as mhatch,
lines as mlines, transforms)
from .bezier import (
NonIntersectingPathException, get_cos_sin, get_intersection,
get_parallels, inside_circle, make_wedged_bezier2,
split_bezier_intersecting_with_closedpath, split_path_inout)
from .path import Path
from ._enums import JoinStyle, CapStyle
@_docstring.interpd
@_api.define_aliases({
"antialiased": ["aa"],
"edgecolor": ["ec"],
"facecolor": ["fc"],
"linestyle": ["ls"],
"linewidth": ["lw"],
})
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
# Whether to draw an edge by default. Set on a
# subclass-by-subclass basis.
_edge_default = False
def __init__(self, *,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch:kwdoc)s
"""
super().__init__()
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = CapStyle.butt
if joinstyle is None:
joinstyle = JoinStyle.miter
self._hatch_color = colors.to_rgba(mpl.rcParams['hatch.color'])
self._hatch_linewidth = mpl.rcParams['hatch.linewidth']
self._fill = bool(fill) # needed for set_facecolor call
if color is not None:
if edgecolor is not None or facecolor is not None:
_api.warn_external(
"Setting the 'color' property will override "
"the edgecolor or facecolor properties.")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self._linewidth = 0
self._unscaled_dash_pattern = (0, None) # offset, dash
self._dash_pattern = (0, None) # offset, dash (scaled by linewidth)
self.set_linestyle(linestyle)
self.set_linewidth(linewidth)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
if len(kwargs):
self._internal_update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch.
If the patch contains Bézier curves, the curves will be interpolated by
line segments. To access the curves as curves, use `get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def _process_radius(self, radius):
if radius is not None:
return radius
if isinstance(self._picker, Number):
_radius = self._picker
else:
if self.get_edgecolor()[3] == 0:
_radius = 0
else:
_radius = self.get_linewidth()
return _radius
def contains(self, mouseevent, radius=None):
"""
Test whether the mouse event occurred in the patch.
Parameters
----------
mouseevent : `~matplotlib.backend_bases.MouseEvent`
Where the user clicked.
radius : float, optional
Additional margin on the patch in target coordinates of
`.Patch.get_transform`. See `.Path.contains_point` for further
details.
If `None`, the default value depends on the state of the object:
- If `.Artist.get_picker` is a number, the default
is that value. This is so that picking works as expected.
- Otherwise if the edge color has a non-zero alpha, the default
is half of the linewidth. This is so that all the colored
pixels are "in" the patch.
- Finally, if the edge has 0 alpha, the default is 0. This is
so that patches without a stroked edge do not have points
outside of the filled region report as "in" due to an
invisible edge.
Returns
-------
(bool, empty dict)
"""
if self._different_canvas(mouseevent):
return False, {}
radius = self._process_radius(radius)
codes = self.get_path().codes
if codes is not None:
vertices = self.get_path().vertices
# if the current path is concatenated by multiple sub paths.
# get the indexes of the starting code(MOVETO) of all sub paths
idxs, = np.where(codes == Path.MOVETO)
# Don't split before the first MOVETO.
idxs = idxs[1:]
subpaths = map(
Path, np.split(vertices, idxs), np.split(codes, idxs))
else:
subpaths = [self.get_path()]
inside = any(
subpath.contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
for subpath in subpaths)
return inside, {}
def contains_point(self, point, radius=None):
"""
Return whether the given point is inside the patch.
Parameters
----------
point : (float, float)
The point (x, y) to check, in target coordinates of
``.Patch.get_transform()``. These are display coordinates for patches
that are added to a figure or Axes.
radius : float, optional
Additional margin on the patch in target coordinates of
`.Patch.get_transform`. See `.Path.contains_point` for further
details.
If `None`, the default value depends on the state of the object:
- If `.Artist.get_picker` is a number, the default
is that value. This is so that picking works as expected.
- Otherwise if the edge color has a non-zero alpha, the default
is half of the linewidth. This is so that all the colored
pixels are "in" the patch.
- Finally, if the edge has 0 alpha, the default is 0. This is
so that patches without a stroked edge do not have points
outside of the filled region report as "in" due to an
invisible edge.
Returns
-------
bool
Notes
-----
The proper use of this method depends on the transform of the patch.
Isolated patches do not have a transform. In this case, the patch
creation coordinates and the point coordinates match. The following
example checks that the center of a circle is within the circle
>>> center = 0, 0
>>> c = Circle(center, radius=1)
>>> c.contains_point(center)
True
The convention of checking against the transformed patch stems from
the fact that this method is predominantly used to check if display
coordinates (e.g. from mouse events) are within the patch. If you want
to do the above check with data coordinates, you have to properly
transform them first:
>>> center = 0, 0
>>> c = Circle(center, radius=3)
>>> plt.gca().add_patch(c)
>>> transformed_interior_point = c.get_data_transform().transform((0, 2))
>>> c.contains_point(transformed_interior_point)
True
"""
radius = self._process_radius(radius)
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def contains_points(self, points, radius=None):
"""
Return whether the given points are inside the patch.
Parameters
----------
points : (N, 2) array
The points to check, in target coordinates of
``self.get_transform()``. These are display coordinates for patches
that are added to a figure or Axes. Columns contain x and y values.
radius : float, optional
Additional margin on the patch in target coordinates of
`.Patch.get_transform`. See `.Path.contains_point` for further
details.
If `None`, the default value depends on the state of the object:
- If `.Artist.get_picker` is a number, the default
is that value. This is so that picking works as expected.
- Otherwise if the edge color has a non-zero alpha, the default
is half of the linewidth. This is so that all the colored
pixels are "in" the patch.
- Finally, if the edge has 0 alpha, the default is 0. This is
so that patches without a stroked edge do not have points
outside of the filled region report as "in" due to an
invisible edge.
Returns
-------
length-N bool array
Notes
-----
The proper use of this method depends on the transform of the patch.
See the notes on `.Patch.contains_point`.
"""
radius = self._process_radius(radius)
return self.get_path().contains_points(points,
self.get_transform(),
radius)
def update_from(self, other):
# docstring inherited.
super().update_from(other)
# For some properties we don't need or don't want to go through the
# getters/setters, so we just copy them directly.
self._edgecolor = other._edgecolor
self._facecolor = other._facecolor
self._original_edgecolor = other._original_edgecolor
self._original_facecolor = other._original_facecolor
self._fill = other._fill
self._hatch = other._hatch
self._hatch_color = other._hatch_color
self._unscaled_dash_pattern = other._unscaled_dash_pattern
self.set_linewidth(other._linewidth) # also sets scaled dashes
self.set_transform(other.get_data_transform())
# If the transform of other needs further initialization, then it will
# be the case for this artist too.
self._transformSet = other.is_transform_set()
def get_extents(self):
"""
Return the `Patch`'s axis-aligned extents as a `~.transforms.Bbox`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""Return the `~.transforms.Transform` applied to the `Patch`."""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the `~.transforms.Transform` mapping data coordinates to
physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the `~.transforms.Transform` instance mapping patch coordinates
to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""Return whether antialiasing is used for drawing."""
return self._antialiased
def get_edgecolor(self):
"""Return the edge color."""
return self._edgecolor
def get_facecolor(self):
"""Return the face color."""
return self._facecolor
def get_linewidth(self):
"""Return the line width in points."""
return self._linewidth
def get_linestyle(self):
"""Return the linestyle."""
return self._linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering.
Parameters
----------
aa : bool or None
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
self.stale = True
def _set_edgecolor(self, color):
set_hatch_color = True
if color is None:
if (mpl.rcParams['patch.force_edgecolor'] or
not self._fill or self._edge_default):
color = mpl.rcParams['patch.edgecolor']
else:
color = 'none'
set_hatch_color = False
self._edgecolor = colors.to_rgba(color, self._alpha)
if set_hatch_color:
self._hatch_color = self._edgecolor
self.stale = True
def set_edgecolor(self, color):
"""
Set the patch edge color.
Parameters
----------
color : :mpltype:`color` or None
"""
self._original_edgecolor = color
self._set_edgecolor(color)
def _set_facecolor(self, color):
if color is None:
color = mpl.rcParams['patch.facecolor']
alpha = self._alpha if self._fill else 0
self._facecolor = colors.to_rgba(color, alpha)
self.stale = True
def set_facecolor(self, color):
"""
Set the patch face color.
Parameters
----------
color : :mpltype:`color` or None
"""
self._original_facecolor = color
self._set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
Parameters
----------
c : :mpltype:`color`
See Also
--------
Patch.set_facecolor, Patch.set_edgecolor
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
# docstring inherited
super().set_alpha(alpha)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
# stale is already True
def set_linewidth(self, w):
"""
Set the patch linewidth in points.
Parameters
----------
w : float or None
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
self._linewidth = float(w)
self._dash_pattern = mlines._scale_dashes(
*self._unscaled_dash_pattern, w)
self.stale = True
def set_linestyle(self, ls):
"""
Set the patch linestyle.
========================================== =================
linestyle description
========================================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
``'none'``, ``'None'``, ``' '``, or ``''`` draw nothing
========================================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq)
where ``onoffseq`` is an even length tuple of on and off ink in points.
Parameters
----------
ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
The line style.
"""
if ls is None:
ls = "solid"
if ls in [' ', '', 'none']:
ls = 'None'
self._linestyle = ls
self._unscaled_dash_pattern = mlines._get_dash_pattern(ls)
self._dash_pattern = mlines._scale_dashes(
*self._unscaled_dash_pattern, self._linewidth)
self.stale = True
def set_fill(self, b):
"""
Set whether to fill the patch.
Parameters
----------
b : bool
"""
self._fill = bool(b)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
self.stale = True
def get_fill(self):
"""Return whether the patch is filled."""
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
@_docstring.interpd
def set_capstyle(self, s):
"""
Set the `.CapStyle`.
The default capstyle is 'round' for `.FancyArrowPatch` and 'butt' for
all other patches.
Parameters
----------
s : `.CapStyle` or %(CapStyle)s
"""
cs = CapStyle(s)
self._capstyle = cs
self.stale = True
def get_capstyle(self):
"""Return the capstyle."""
return self._capstyle.name
@_docstring.interpd
def set_joinstyle(self, s):
"""
Set the `.JoinStyle`.
The default joinstyle is 'round' for `.FancyArrowPatch` and 'miter' for
all other patches.
Parameters
----------
s : `.JoinStyle` or %(JoinStyle)s
"""
js = JoinStyle(s)
self._joinstyle = js
self.stale = True
def get_joinstyle(self):
"""Return the joinstyle."""
return self._joinstyle.name
def set_hatch(self, hatch):
r"""
Set the hatching pattern.
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Parameters
----------
hatch : {'/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}
"""
# Use validate_hatch(list) after deprecation.
mhatch._validate_hatch_pattern(hatch)
self._hatch = hatch
self.stale = True
def get_hatch(self):
"""Return the hatching pattern."""
return self._hatch
def set_hatch_linewidth(self, lw):
"""Set the hatch linewidth."""
self._hatch_linewidth = lw
def get_hatch_linewidth(self):
"""Return the hatch linewidth."""
return self._hatch_linewidth
def _draw_paths_with_artist_properties(
self, renderer, draw_path_args_list):
"""
``draw()`` helper factored out for sharing with `FancyArrowPatch`.
Configure *renderer* and the associated graphics context *gc*
from the artist properties, then repeatedly call
``renderer.draw_path(gc, *draw_path_args)`` for each tuple
*draw_path_args* in *draw_path_args_list*.
"""
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0 or self._linestyle == 'None':
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(*self._dash_pattern)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
gc.set_hatch_color(self._hatch_color)
gc.set_hatch_linewidth(self._hatch_linewidth)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for draw_path_args in draw_path_args_list:
renderer.draw_path(gc, *draw_path_args)
gc.restore()
renderer.close_group('patch')
self.stale = False
@artist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
self._draw_paths_with_artist_properties(
renderer,
[(tpath, affine,
# Work around a bug in the PDF and SVG renderers, which
# do not draw the hatches if the facecolor is fully
# transparent, but do if it is None.
self._facecolor if self._facecolor[3] else None)])
def get_path(self):
"""Return the path of this patch."""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
def _convert_xy_units(self, xy):
"""Convert x and y units for a tuple (x, y)."""
x = self.convert_xunits(xy[0])
y = self.convert_yunits(xy[1])
return x, y
class Shadow(Patch):
def __str__(self):
return f"Shadow({self.patch})"
@_docstring.interpd
def __init__(self, patch, ox, oy, *, shade=0.7, **kwargs):
"""
Create a shadow of the given *patch*.
By default, the shadow will have the same face color as the *patch*,
but darkened. The darkness can be controlled by *shade*.
Parameters
----------
patch : `~matplotlib.patches.Patch`
The patch to create the shadow for.
ox, oy : float
The shift of the shadow in data coordinates, scaled by a factor
of dpi/72.
shade : float, default: 0.7
How the darkness of the shadow relates to the original color. If 1, the
shadow is black, if 0, the shadow has the same color as the *patch*.
.. versionadded:: 3.8
**kwargs
Properties of the shadow patch. Supported keys are:
%(Patch:kwdoc)s
"""
super().__init__()
self.patch = patch
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self.update_from(self.patch)
if not 0 <= shade <= 1:
raise ValueError("shade must be between 0 and 1.")
color = (1 - shade) * np.asarray(colors.to_rgb(self.patch.get_facecolor()))
self.update({'facecolor': color, 'edgecolor': color, 'alpha': 0.5,
# Place shadow patch directly behind the inherited patch.
'zorder': np.nextafter(self.patch.zorder, -np.inf),
**kwargs})
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
super().draw(renderer)
class Rectangle(Patch):
"""
A rectangle defined via an anchor point *xy* and its *width* and *height*.
The rectangle extends from ``xy[0]`` to ``xy[0] + width`` in x-direction
and from ``xy[1]`` to ``xy[1] + height`` in y-direction. ::
: +------------------+
: | |
: height |
: | |
: (xy)---- width -----+
One may picture *xy* as the bottom left corner, but which corner *xy* is
actually depends on the direction of the axis and the sign of *width*
and *height*; e.g. *xy* would be the bottom right corner if the x-axis
was inverted or if *width* was negative.
"""
def __str__(self):
pars = self._x0, self._y0, self._width, self._height, self.angle
fmt = "Rectangle(xy=(%g, %g), width=%g, height=%g, angle=%g)"
return fmt % pars
@_docstring.interpd
def __init__(self, xy, width, height, *,
angle=0.0, rotation_point='xy', **kwargs):
"""
Parameters
----------
xy : (float, float)
The anchor point.
width : float
Rectangle width.
height : float
Rectangle height.
angle : float, default: 0
Rotation in degrees anti-clockwise about the rotation point.
rotation_point : {'xy', 'center', (number, number)}, default: 'xy'
If ``'xy'``, rotate around the anchor point. If ``'center'`` rotate
around the center. If 2-tuple of number, rotate around this
coordinate.
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Patch` properties
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._x0 = xy[0]
self._y0 = xy[1]
self._width = width
self._height = height
self.angle = float(angle)
self.rotation_point = rotation_point
# Required for RectangleSelector with axes aspect ratio != 1
# The patch is defined in data coordinates and when changing the
# selector with square modifier and not in data coordinates, we need
# to correct for the aspect ratio difference between the data and
# display coordinate systems. Its value is typically provide by
# Axes._get_aspect_ratio()
self._aspect_ratio_correction = 1.0
self._convert_units() # Validate the inputs.
def get_path(self):
"""Return the vertices of the rectangle."""
return Path.unit_rectangle()
def _convert_units(self):
"""Convert bounds of the rectangle."""
x0 = self.convert_xunits(self._x0)
y0 = self.convert_yunits(self._y0)
x1 = self.convert_xunits(self._x0 + self._width)
y1 = self.convert_yunits(self._y0 + self._height)
return x0, y0, x1, y1
def get_patch_transform(self):
# Note: This cannot be called until after this has been added to
# an Axes, otherwise unit conversion will fail. This makes it very
# important to call the accessor method and not directly access the
# transformation member variable.
bbox = self.get_bbox()
if self.rotation_point == 'center':
width, height = bbox.x1 - bbox.x0, bbox.y1 - bbox.y0
rotation_point = bbox.x0 + width / 2., bbox.y0 + height / 2.
elif self.rotation_point == 'xy':
rotation_point = bbox.x0, bbox.y0
else:
rotation_point = self.rotation_point
return transforms.BboxTransformTo(bbox) \
+ transforms.Affine2D() \
.translate(-rotation_point[0], -rotation_point[1]) \
.scale(1, self._aspect_ratio_correction) \
.rotate_deg(self.angle) \
.scale(1, 1 / self._aspect_ratio_correction) \
.translate(*rotation_point)
@property
def rotation_point(self):
"""The rotation point of the patch."""
return self._rotation_point
@rotation_point.setter
def rotation_point(self, value):
if value in ['center', 'xy'] or (
isinstance(value, tuple) and len(value) == 2 and
isinstance(value[0], Real) and isinstance(value[1], Real)
):
self._rotation_point = value
else:
raise ValueError("`rotation_point` must be one of "
"{'xy', 'center', (number, number)}.")
def get_x(self):
"""Return the left coordinate of the rectangle."""
return self._x0
def get_y(self):
"""Return the bottom coordinate of the rectangle."""
return self._y0
def get_xy(self):
"""Return the left and bottom coords of the rectangle as a tuple."""
return self._x0, self._y0
def get_corners(self):
"""
Return the corners of the rectangle, moving anti-clockwise from
(x0, y0).
"""
return self.get_patch_transform().transform(
[(0, 0), (1, 0), (1, 1), (0, 1)])
def get_center(self):
"""Return the centre of the rectangle."""
return self.get_patch_transform().transform((0.5, 0.5))
def get_width(self):
"""Return the width of the rectangle."""
return self._width
def get_height(self):
"""Return the height of the rectangle."""
return self._height
def get_angle(self):
"""Get the rotation angle in degrees."""
return self.angle
def set_x(self, x):
"""Set the left coordinate of the rectangle."""
self._x0 = x
self.stale = True
def set_y(self, y):
"""Set the bottom coordinate of the rectangle."""
self._y0 = y
self.stale = True
def set_angle(self, angle):
"""
Set the rotation angle in degrees.
The rotation is performed anti-clockwise around *xy*.
"""
self.angle = angle
self.stale = True
def set_xy(self, xy):
"""
Set the left and bottom coordinates of the rectangle.
Parameters
----------
xy : (float, float)
"""
self._x0, self._y0 = xy
self.stale = True
def set_width(self, w):
"""Set the width of the rectangle."""
self._width = w
self.stale = True
def set_height(self, h):
"""Set the height of the rectangle."""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle as *left*, *bottom*, *width*, *height*.
The values may be passed as separate parameters or as a tuple::
set_bounds(left, bottom, width, height)
set_bounds((left, bottom, width, height))
.. ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 1:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x0 = l
self._y0 = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
"""Return the `.Bbox`."""
return transforms.Bbox.from_extents(*self._convert_units())
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""A regular polygon patch."""
def __str__(self):
s = "RegularPolygon((%g, %g), %d, radius=%g, orientation=%g)"
return s % (self.xy[0], self.xy[1], self.numvertices, self.radius,
self.orientation)
@_docstring.interpd
def __init__(self, xy, numVertices, *,
radius=5, orientation=0, **kwargs):
"""
Parameters
----------
xy : (float, float)
The center position.
numVertices : int
The number of vertices.
radius : float
The distance from the center to each of the vertices.
orientation : float
The polygon rotation angle (in radians).
**kwargs
`Patch` properties:
%(Patch:kwdoc)s
"""
self.xy = xy
self.numvertices = numVertices
self.orientation = orientation
self.radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._patch_transform = transforms.Affine2D()
super().__init__(**kwargs)
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
class PathPatch(Patch):
"""A general polycurve path patch."""
_edge_default = True
def __str__(self):
s = "PathPatch%d((%g, %g) ...)"
return s % (len(self._path.vertices), *tuple(self._path.vertices[0]))
@_docstring.interpd
def __init__(self, path, **kwargs):
"""
*path* is a `.Path` object.
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._path = path
def get_path(self):
return self._path
def set_path(self, path):
self._path = path
class StepPatch(PathPatch):
"""
A path patch describing a stepwise constant function.
By default, the path is not closed and starts and stops at
baseline value.
"""
_edge_default = False
@_docstring.interpd
def __init__(self, values, edges, *,
orientation='vertical', baseline=0, **kwargs):
"""
Parameters
----------
values : array-like
The step heights.
edges : array-like
The edge positions, with ``len(edges) == len(vals) + 1``,
between which the curve takes on vals values.
orientation : {'vertical', 'horizontal'}, default: 'vertical'
The direction of the steps. Vertical means that *values* are
along the y-axis, and edges are along the x-axis.
baseline : float, array-like or None, default: 0
The bottom value of the bounding edges or when
``fill=True``, position of lower edge. If *fill* is
True or an array is passed to *baseline*, a closed
path is drawn.
**kwargs
`Patch` properties:
%(Patch:kwdoc)s
"""
self.orientation = orientation
self._edges = np.asarray(edges)
self._values = np.asarray(values)
self._baseline = np.asarray(baseline) if baseline is not None else None
self._update_path()
super().__init__(self._path, **kwargs)
def _update_path(self):
if np.isnan(np.sum(self._edges)):
raise ValueError('Nan values in "edges" are disallowed')
if self._edges.size - 1 != self._values.size:
raise ValueError('Size mismatch between "values" and "edges". '
"Expected `len(values) + 1 == len(edges)`, but "
f"`len(values) = {self._values.size}` and "
f"`len(edges) = {self._edges.size}`.")
# Initializing with empty arrays allows supporting empty stairs.
verts, codes = [np.empty((0, 2))], [np.empty(0, dtype=Path.code_type)]
_nan_mask = np.isnan(self._values)
if self._baseline is not None:
_nan_mask |= np.isnan(self._baseline)
for idx0, idx1 in cbook.contiguous_regions(~_nan_mask):
x = np.repeat(self._edges[idx0:idx1+1], 2)
y = np.repeat(self._values[idx0:idx1], 2)
if self._baseline is None:
y = np.concatenate([y[:1], y, y[-1:]])
elif self._baseline.ndim == 0: # single baseline value
y = np.concatenate([[self._baseline], y, [self._baseline]])
elif self._baseline.ndim == 1: # baseline array
base = np.repeat(self._baseline[idx0:idx1], 2)[::-1]
x = np.concatenate([x, x[::-1]])
y = np.concatenate([base[-1:], y, base[:1],
base[:1], base, base[-1:]])
else: # no baseline
raise ValueError('Invalid `baseline` specified')
if self.orientation == 'vertical':
xy = np.column_stack([x, y])
else:
xy = np.column_stack([y, x])
verts.append(xy)
codes.append([Path.MOVETO] + [Path.LINETO]*(len(xy)-1))
self._path = Path(np.concatenate(verts), np.concatenate(codes))
def get_data(self):
"""Get `.StepPatch` values, edges and baseline as namedtuple."""
StairData = namedtuple('StairData', 'values edges baseline')
return StairData(self._values, self._edges, self._baseline)
def set_data(self, values=None, edges=None, baseline=None):
"""
Set `.StepPatch` values, edges and baseline.
Parameters
----------
values : 1D array-like or None
Will not update values, if passing None
edges : 1D array-like, optional
baseline : float, 1D array-like or None
"""
if values is None and edges is None and baseline is None:
raise ValueError("Must set *values*, *edges* or *baseline*.")
if values is not None:
self._values = np.asarray(values)
if edges is not None:
self._edges = np.asarray(edges)
if baseline is not None:
self._baseline = np.asarray(baseline)
self._update_path()
self.stale = True
class Polygon(Patch):
"""A general polygon patch."""
def __str__(self):
if len(self._path.vertices):
s = "Polygon%d((%g, %g) ...)"
return s % (len(self._path.vertices), *self._path.vertices[0])
else:
return "Polygon0()"
@_docstring.interpd
def __init__(self, xy, *, closed=True, **kwargs):
"""
Parameters
----------
xy : (N, 2) array
closed : bool, default: True
Whether the polygon is closed (i.e., has identical start and end
points).
**kwargs
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""Get the `.Path` of the polygon."""
return self._path
def get_closed(self):
"""Return whether the polygon is closed."""
return self._closed
def set_closed(self, closed):
"""
Set whether the polygon is closed.
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
self.stale = True
def get_xy(self):
"""
Get the vertices of the path.
Returns
-------
(N, 2) array
The coordinates of the vertices.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon.
Parameters
----------
xy : (N, 2) array-like
The coordinates of the vertices.
Notes
-----
Unlike `.Path`, we do not ignore the last input vertex. If the
polygon is meant to be closed, and the last point of the polygon is not
equal to the first, we assume that the user has not explicitly passed a
``CLOSEPOLY`` vertex, and add it ourselves.
"""
xy = np.asarray(xy)
nverts, _ = xy.shape
if self._closed:
# if the first and last vertex are the "same", then we assume that
# the user explicitly passed the CLOSEPOLY vertex. Otherwise, we
# have to append one since the last vertex will be "ignored" by
# Path
if nverts == 1 or nverts > 1 and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
# if we aren't closed, and the last vertex matches the first, then
# we assume we have an unnecessary CLOSEPOLY vertex and remove it
if nverts > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
self.stale = True
xy = property(get_xy, set_xy,
doc='The vertices of the path as a (N, 2) array.')
class Wedge(Patch):
"""Wedge shaped patch."""
def __str__(self):
pars = (self.center[0], self.center[1], self.r,
self.theta1, self.theta2, self.width)
fmt = "Wedge(center=(%g, %g), r=%g, theta1=%g, theta2=%g, width=%s)"
return fmt % pars
@_docstring.interpd
def __init__(self, center, r, theta1, theta2, *, width=None, **kwargs):
"""
A wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * (self.r - self.width) / self.r
v = np.concatenate([v1, v2, [(0, 0)]])
c = [*arc.codes, connector, *arc.codes[1:], Path.CLOSEPOLY]
else:
# Wedge doesn't need an inner ring
v = np.concatenate([arc.vertices, [(0, 0), (0, 0)]])
c = [*arc.codes, connector, Path.CLOSEPOLY]
# Shift and scale the wedge to the final location.
self._path = Path(v * self.r + self.center, c)
def set_center(self, center):
self._path = None
self.center = center
self.stale = True
def set_radius(self, radius):
self._path = None
self.r = radius
self.stale = True
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
self.stale = True
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
self.stale = True
def set_width(self, width):
self._path = None
self.width = width
self.stale = True
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""An arrow patch."""
def __str__(self):
return "Arrow()"
_path = Path._create_closed([
[0.0, 0.1], [0.0, -0.1], [0.8, -0.1], [0.8, -0.3], [1.0, 0.0],
[0.8, 0.3], [0.8, 0.1]])
@_docstring.interpd
def __init__(self, x, y, dx, dy, *, width=1.0, **kwargs):
"""
Draws an arrow from (*x*, *y*) to (*x* + *dx*, *y* + *dy*).
The width of the arrow is scaled by *width*.
Parameters
----------
x : float
x coordinate of the arrow tail.
y : float
y coordinate of the arrow tail.
dx : float
Arrow length in the x direction.
dy : float
Arrow length in the y direction.
width : float, default: 1
Scale factor for the width of the arrow. With a default value of 1,
the tail width is 0.2 and head width is 0.6.
**kwargs
Keyword arguments control the `Patch` properties:
%(Patch:kwdoc)s
See Also
--------
FancyArrow
Patch that allows independent control of the head and tail
properties.
"""
super().__init__(**kwargs)
self.set_data(x, y, dx, dy, width)
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
def set_data(self, x=None, y=None, dx=None, dy=None, width=None):
"""
Set `.Arrow` x, y, dx, dy and width.
Values left as None will not be updated.
Parameters
----------
x, y : float or None, default: None
The x and y coordinates of the arrow base.
dx, dy : float or None, default: None
The length of the arrow along x and y direction.
width : float or None, default: None
Width of full arrow tail.
"""
if x is not None:
self._x = x
if y is not None:
self._y = y
if dx is not None:
self._dx = dx
if dy is not None:
self._dy = dy
if width is not None:
self._width = width
self._patch_transform = (
transforms.Affine2D()
.scale(np.hypot(self._dx, self._dy), self._width)
.rotate(np.arctan2(self._dy, self._dx))
.translate(self._x, self._y)
.frozen())
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
_edge_default = True
def __str__(self):
return "FancyArrow()"
@_docstring.interpd
def __init__(self, x, y, dx, dy, *,
width=0.001, length_includes_head=False, head_width=None,
head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Parameters
----------
x, y : float
The x and y coordinates of the arrow base.
dx, dy : float
The length of the arrow along x and y direction.
width : float, default: 0.001
Width of full arrow tail.
length_includes_head : bool, default: False
True if head is to be counted in calculating the length.
head_width : float or None, default: 3*width
Total width of the full arrow head.
head_length : float or None, default: 1.5*head_width
Length of arrow head.
shape : {'full', 'left', 'right'}, default: 'full'
Draw the left-half, right-half, or full arrow.
overhang : float, default: 0
Fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
head_starts_at_zero : bool, default: False
If True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
**kwargs
`.Patch` properties:
%(Patch:kwdoc)s
"""
self._x = x
self._y = y
self._dx = dx
self._dy = dy
self._width = width
self._length_includes_head = length_includes_head
self._head_width = head_width
self._head_length = head_length
self._shape = shape
self._overhang = overhang
self._head_starts_at_zero = head_starts_at_zero
self._make_verts()
super().__init__(self.verts, closed=True, **kwargs)
def set_data(self, *, x=None, y=None, dx=None, dy=None, width=None,
head_width=None, head_length=None):
"""
Set `.FancyArrow` x, y, dx, dy, width, head_with, and head_length.
Values left as None will not be updated.
Parameters
----------
x, y : float or None, default: None
The x and y coordinates of the arrow base.
dx, dy : float or None, default: None
The length of the arrow along x and y direction.
width : float or None, default: None
Width of full arrow tail.
head_width : float or None, default: None
Total width of the full arrow head.
head_length : float or None, default: None
Length of arrow head.
"""
if x is not None:
self._x = x
if y is not None:
self._y = y
if dx is not None:
self._dx = dx
if dy is not None:
self._dy = dy
if width is not None:
self._width = width
if head_width is not None:
self._head_width = head_width
if head_length is not None:
self._head_length = head_length
self._make_verts()
self.set_xy(self.verts)
def _make_verts(self):
if self._head_width is None:
head_width = 3 * self._width
else:
head_width = self._head_width
if self._head_length is None:
head_length = 1.5 * head_width
else:
head_length = self._head_length
distance = np.hypot(self._dx, self._dy)
if self._length_includes_head:
length = distance
else:
length = distance + head_length
if np.size(length) == 0:
self.verts = np.empty([0, 2]) # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0, 0)
hw, hl = head_width, head_length
hs, lw = self._overhang, self._width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2], # leftmost
[-hl * (1 - hs), -lw / 2], # meets stem
[-length, -lw / 2], # bottom left
[-length, 0],
])
# if we're not including the head, shift up by head length
if not self._length_includes_head:
left_half_arrow += [head_length, 0]
# if the head starts at 0, shift up by another head length
if self._head_starts_at_zero:
left_half_arrow += [head_length / 2, 0]
# figure out the shape, and complete accordingly
if self._shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if self._shape == 'right':
coords = right_half_arrow
elif self._shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError(f"Got unknown shape: {self._shape!r}")
if distance != 0:
cx = self._dx / distance
sx = self._dy / distance
else:
# Account for division by zero
cx, sx = 0, 1
M = [[cx, sx], [-sx, cx]]
self.verts = np.dot(coords, M) + [
self._x + self._dx,
self._y + self._dy,
]
_docstring.interpd.register(
FancyArrow="\n".join(
(inspect.getdoc(FancyArrow.__init__) or "").splitlines()[2:]))
class CirclePolygon(RegularPolygon):
"""A polygon-approximation of a circle patch."""
def __str__(self):
s = "CirclePolygon((%g, %g), radius=%g, resolution=%d)"
return s % (self.xy[0], self.xy[1], self.radius, self.numvertices)
@_docstring.interpd
def __init__(self, xy, radius=5, *,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with *resolution*
sides. For a smoother circle drawn with splines, see `Circle`.
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(
xy, resolution, radius=radius, orientation=0, **kwargs)
class Ellipse(Patch):
"""A scale-free ellipse."""
def __str__(self):
pars = (self._center[0], self._center[1],
self.width, self.height, self.angle)
fmt = "Ellipse(xy=(%s, %s), width=%s, height=%s, angle=%s)"
return fmt % pars
@_docstring.interpd
def __init__(self, xy, width, height, *, angle=0, **kwargs):
"""
Parameters
----------
xy : (float, float)
xy coordinates of ellipse centre.
width : float
Total length (diameter) of horizontal axis.
height : float
Total length (diameter) of vertical axis.
angle : float, default: 0
Rotation in degrees anti-clockwise.
Notes
-----
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._center = xy
self._width, self._height = width, height
self._angle = angle
self._path = Path.unit_circle()
# Required for EllipseSelector with axes aspect ratio != 1
# The patch is defined in data coordinates and when changing the
# selector with square modifier and not in data coordinates, we need
# to correct for the aspect ratio difference between the data and
# display coordinate systems.
self._aspect_ratio_correction = 1.0
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""
Notes
-----
This cannot be called until after this has been added to an Axes,
otherwise unit conversion will fail. This makes it very important to
call the accessor method and not directly access the transformation
member variable.
"""
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5 * self._aspect_ratio_correction) \
.rotate_deg(self.angle) \
.scale(1, 1 / self._aspect_ratio_correction) \
.translate(*center)
def get_path(self):
"""Return the path of the ellipse."""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def set_center(self, xy):
"""
Set the center of the ellipse.
Parameters
----------
xy : (float, float)
"""
self._center = xy
self.stale = True
def get_center(self):
"""Return the center of the ellipse."""
return self._center
center = property(get_center, set_center)
def set_width(self, width):
"""
Set the width of the ellipse.
Parameters
----------
width : float
"""
self._width = width
self.stale = True
def get_width(self):
"""
Return the width of the ellipse.
"""
return self._width
width = property(get_width, set_width)
def set_height(self, height):
"""
Set the height of the ellipse.
Parameters
----------
height : float
"""
self._height = height
self.stale = True
def get_height(self):
"""Return the height of the ellipse."""
return self._height
height = property(get_height, set_height)
def set_angle(self, angle):
"""
Set the angle of the ellipse.
Parameters
----------
angle : float
"""
self._angle = angle
self.stale = True
def get_angle(self):
"""Return the angle of the ellipse."""
return self._angle
angle = property(get_angle, set_angle)
def get_corners(self):
"""
Return the corners of the ellipse bounding box.
The bounding box orientation is moving anti-clockwise from the
lower left corner defined before rotation.
"""
return self.get_patch_transform().transform(
[(-1, -1), (1, -1), (1, 1), (-1, 1)])
def get_vertices(self):
"""
Return the vertices coordinates of the ellipse.
The definition can be found `here `_
.. versionadded:: 3.8
"""
if self.width < self.height:
ret = self.get_patch_transform().transform([(0, 1), (0, -1)])
else:
ret = self.get_patch_transform().transform([(1, 0), (-1, 0)])
return [tuple(x) for x in ret]
def get_co_vertices(self):
"""
Return the co-vertices coordinates of the ellipse.
The definition can be found `here `_
.. versionadded:: 3.8
"""
if self.width < self.height:
ret = self.get_patch_transform().transform([(1, 0), (-1, 0)])
else:
ret = self.get_patch_transform().transform([(0, 1), (0, -1)])
return [tuple(x) for x in ret]
class Annulus(Patch):
"""
An elliptical annulus.
"""
@_docstring.interpd
def __init__(self, xy, r, width, angle=0.0, **kwargs):
"""
Parameters
----------
xy : (float, float)
xy coordinates of annulus centre.
r : float or (float, float)
The radius, or semi-axes:
- If float: radius of the outer circle.
- If two floats: semi-major and -minor axes of outer ellipse.
width : float
Width (thickness) of the annular ring. The width is measured inward
from the outer ellipse so that for the inner ellipse the semi-axes
are given by ``r - width``. *width* must be less than or equal to
the semi-minor axis.
angle : float, default: 0
Rotation angle in degrees (anti-clockwise from the positive
x-axis). Ignored for circular annuli (i.e., if *r* is a scalar).
**kwargs
Keyword arguments control the `Patch` properties:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self.set_radii(r)
self.center = xy
self.width = width
self.angle = angle
self._path = None
def __str__(self):
if self.a == self.b:
r = self.a
else:
r = (self.a, self.b)
return "Annulus(xy=(%s, %s), r=%s, width=%s, angle=%s)" % \
(*self.center, r, self.width, self.angle)
def set_center(self, xy):
"""
Set the center of the annulus.
Parameters
----------
xy : (float, float)
"""
self._center = xy
self._path = None
self.stale = True
def get_center(self):
"""Return the center of the annulus."""
return self._center
center = property(get_center, set_center)
def set_width(self, width):
"""
Set the width (thickness) of the annulus ring.
The width is measured inwards from the outer ellipse.
Parameters
----------
width : float
"""
if width > min(self.a, self.b):
raise ValueError(
'Width of annulus must be less than or equal to semi-minor axis')
self._width = width
self._path = None
self.stale = True
def get_width(self):
"""Return the width (thickness) of the annulus ring."""
return self._width
width = property(get_width, set_width)
def set_angle(self, angle):
"""
Set the tilt angle of the annulus.
Parameters
----------
angle : float
"""
self._angle = angle
self._path = None
self.stale = True
def get_angle(self):
"""Return the angle of the annulus."""
return self._angle
angle = property(get_angle, set_angle)
def set_semimajor(self, a):
"""
Set the semi-major axis *a* of the annulus.
Parameters
----------
a : float
"""
self.a = float(a)
self._path = None
self.stale = True
def set_semiminor(self, b):
"""
Set the semi-minor axis *b* of the annulus.
Parameters
----------
b : float
"""
self.b = float(b)
self._path = None
self.stale = True
def set_radii(self, r):
"""
Set the semi-major (*a*) and semi-minor radii (*b*) of the annulus.
Parameters
----------
r : float or (float, float)
The radius, or semi-axes:
- If float: radius of the outer circle.
- If two floats: semi-major and -minor axes of outer ellipse.
"""
if np.shape(r) == (2,):
self.a, self.b = r
elif np.shape(r) == ():
self.a = self.b = float(r)
else:
raise ValueError("Parameter 'r' must be one or two floats.")
self._path = None
self.stale = True
def get_radii(self):
"""Return the semi-major and semi-minor radii of the annulus."""
return self.a, self.b
radii = property(get_radii, set_radii)
def _transform_verts(self, verts, a, b):
return transforms.Affine2D() \
.scale(*self._convert_xy_units((a, b))) \
.rotate_deg(self.angle) \
.translate(*self._convert_xy_units(self.center)) \
.transform(verts)
def _recompute_path(self):
# circular arc
arc = Path.arc(0, 360)
# annulus needs to draw an outer ring
# followed by a reversed and scaled inner ring
a, b, w = self.a, self.b, self.width
v1 = self._transform_verts(arc.vertices, a, b)
v2 = self._transform_verts(arc.vertices[::-1], a - w, b - w)
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, Path.MOVETO,
arc.codes[1:], Path.MOVETO,
Path.CLOSEPOLY])
self._path = Path(v, c)
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
pars = self.center[0], self.center[1], self.radius
fmt = "Circle(xy=(%g, %g), radius=%g)"
return fmt % pars
@_docstring.interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create a true circle at center *xy* = (*x*, *y*) with given *radius*.
Unlike `CirclePolygon` which is a polygonal approximation, this uses
Bezier splines and is much closer to a scale-free circle.
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(xy, radius * 2, radius * 2, **kwargs)
self.radius = radius
def set_radius(self, radius):
"""
Set the radius of the circle.
Parameters
----------
radius : float
"""
self.width = self.height = 2 * radius
self.stale = True
def get_radius(self):
"""Return the radius of the circle."""
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc, i.e. a segment of an ellipse.
Due to internal optimizations, the arc cannot be filled.
"""
def __str__(self):
pars = (self.center[0], self.center[1], self.width,
self.height, self.angle, self.theta1, self.theta2)
fmt = ("Arc(xy=(%g, %g), width=%g, "
"height=%g, angle=%g, theta1=%g, theta2=%g)")
return fmt % pars
@_docstring.interpd
def __init__(self, xy, width, height, *,
angle=0.0, theta1=0.0, theta2=360.0, **kwargs):
"""
Parameters
----------
xy : (float, float)
The center of the ellipse.
width : float
The length of the horizontal axis.
height : float
The length of the vertical axis.
angle : float
Rotation of the ellipse in degrees (counterclockwise).
theta1, theta2 : float, default: 0, 360
Starting and ending angles of the arc in degrees. These values
are relative to *angle*, e.g. if *angle* = 45 and *theta1* = 90
the absolute starting angle is 135.
Default *theta1* = 0, *theta2* = 360, i.e. a complete ellipse.
The arc is drawn in the counterclockwise direction.
Angles greater than or equal to 360, or smaller than 0, are
represented by an equivalent angle in the range [0, 360), by
taking the input value mod 360.
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Patch` properties
Most `.Patch` properties are supported as keyword arguments,
except *fill* and *facecolor* because filling is not supported.
%(Patch:kwdoc)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects cannot be filled")
super().__init__(xy, width, height, angle=angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
(self._theta1, self._theta2, self._stretched_width,
self._stretched_height) = self._theta_stretch()
self._path = Path.arc(self._theta1, self._theta2)
@artist.allow_rasterization
def draw(self, renderer):
"""
Draw the arc to the given *renderer*.
Notes
-----
Ellipses are normally drawn using an approximation that uses
eight cubic Bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. *Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.*
https://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes (or figure)
bounding box are located. (This is done by performing an inverse
transformation on the bbox such that it is relative to the unit
circle -- this makes the intersection calculation much easier than
doing rotated ellipse intersection directly.)
This uses the "line intersecting a circle" algorithm from:
Vince, John. *Geometry for Computer Graphics: Formulae,
Examples & Proofs.* London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the Bezier arc
approximation technique implemented in `.Path.arc`.
"""
if not self.get_visible():
return
self._recompute_transform()
self._update_path()
# Get width and height in pixels we need to use
# `self.get_data_transform` rather than `self.get_transform`
# because we want the transform from dataspace to the
# screen space to estimate how big the arc will be in physical
# units when rendered (the transform that we get via
# `self.get_transform()` goes from an idealized unit-radius
# space to screen space).
data_to_screen_trans = self.get_data_transform()
pwidth, pheight = (
data_to_screen_trans.transform((self._stretched_width,
self._stretched_height)) -
data_to_screen_trans.transform((0, 0)))
inv_error = (1.0 / 1.89818e-6) * 0.5
if pwidth < inv_error and pheight < inv_error:
return Patch.draw(self, renderer)
def line_circle_intersect(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
if discrim >= 0.0:
sign_dy = np.copysign(1, dy) # +/-1, never 0.
sqrt_discrim = np.sqrt(discrim)
return np.array(
[[(D * dy + sign_dy * dx * sqrt_discrim) / dr2,
(-D * dx + abs(dy) * sqrt_discrim) / dr2],
[(D * dy - sign_dy * dx * sqrt_discrim) / dr2,
(-D * dx - abs(dy) * sqrt_discrim) / dr2]])
else:
return np.empty((0, 2))
def segment_circle_intersect(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
xys = line_circle_intersect(x0, y0, x1, y1)
xs, ys = xys.T
return xys[
(x0e - epsilon < xs) & (xs < x1e + epsilon)
& (y0e - epsilon < ys) & (ys < y1e + epsilon)
]
# Transform the Axes (or figure) box_path so that it is relative to
# the unit circle in the same way that it is relative to the desired
# ellipse.
box_path_transform = (
transforms.BboxTransformTo((self.axes or self.get_figure(root=False)).bbox)
- self.get_transform())
box_path = Path.unit_rectangle().transformed(box_path_transform)
thetas = set()
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
xy = segment_circle_intersect(*p0, *p1)
x, y = xy.T
# arctan2 return [-pi, pi), the rest of our angles are in
# [0, 360], adjust as needed.
theta = (np.rad2deg(np.arctan2(y, x)) + 360) % 360
thetas.update(
theta[(self._theta1 < theta) & (theta < self._theta2)])
thetas = sorted(thetas) + [self._theta2]
last_theta = self._theta1
theta1_rad = np.deg2rad(self._theta1)
inside = box_path.contains_point(
(np.cos(theta1_rad), np.sin(theta1_rad))
)
# save original path
path_original = self._path
for theta in thetas:
if inside:
self._path = Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def _update_path(self):
# Compute new values and update and set new _path if any value changed
stretched = self._theta_stretch()
if any(a != b for a, b in zip(
stretched, (self._theta1, self._theta2, self._stretched_width,
self._stretched_height))):
(self._theta1, self._theta2, self._stretched_width,
self._stretched_height) = stretched
self._path = Path.arc(self._theta1, self._theta2)
def _theta_stretch(self):
# If the width and height of ellipse are not equal, take into account
# stretching when calculating angles to draw between
def theta_stretch(theta, scale):
theta = np.deg2rad(theta)
x = np.cos(theta)
y = np.sin(theta)
stheta = np.rad2deg(np.arctan2(scale * y, x))
# arctan2 has the range [-pi, pi], we expect [0, 2*pi]
return (stheta + 360) % 360
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
if (
# if we need to stretch the angles because we are distorted
width != height
# and we are not doing a full circle.
#
# 0 and 360 do not exactly round-trip through the angle
# stretching (due to both float precision limitations and
# the difference between the range of arctan2 [-pi, pi] and
# this method [0, 360]) so avoid doing it if we don't have to.
and not (self.theta1 != self.theta2 and
self.theta1 % 360 == self.theta2 % 360)
):
theta1 = theta_stretch(self.theta1, width / height)
theta2 = theta_stretch(self.theta2, width / height)
return theta1, theta2, width, height
return self.theta1, self.theta2, width, height
def bbox_artist(artist, renderer, props=None, fill=True):
"""
A debug function to draw a rectangle around the bounding
box returned by an artist's `.Artist.get_window_extent`
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
r = Rectangle(
xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2),
width=bbox.width + pad, height=bbox.height + pad,
fill=fill, transform=transforms.IdentityTransform(), clip_on=False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
A debug function to draw a rectangle around the bounding
box returned by an artist's `.Artist.get_window_extent`
to test whether the artist is returning the correct bbox.
"""
r = Rectangle(xy=bbox.p0, width=bbox.width, height=bbox.height,
edgecolor=color, fill=False, clip_on=False)
if trans is not None:
r.set_transform(trans)
r.draw(renderer)
class _Style:
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __init_subclass__(cls):
# Automatically perform docstring interpolation on the subclasses:
# This allows listing the supported styles via
# - %(BoxStyle:table)s
# - %(ConnectionStyle:table)s
# - %(ArrowStyle:table)s
# and additionally adding .. ACCEPTS: blocks via
# - %(BoxStyle:table_and_accepts)s
# - %(ConnectionStyle:table_and_accepts)s
# - %(ArrowStyle:table_and_accepts)s
_docstring.interpd.register(**{
f"{cls.__name__}:table": cls.pprint_styles(),
f"{cls.__name__}:table_and_accepts": (
cls.pprint_styles()
+ "\n\n .. ACCEPTS: ["
+ "|".join(map(" '{}' ".format, cls._style_list))
+ "]")
})
def __new__(cls, stylename, **kwargs):
"""Return the instance of the subclass with the given style name."""
# The "class" should have the _style_list attribute, which is a mapping
# of style names to style classes.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = cls._style_list[_name]
except KeyError as err:
raise ValueError(f"Unknown style: {stylename!r}") from err
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = {k: float(v) for k, v in _args_pair}
except ValueError as err:
raise ValueError(
f"Incorrect style argument: {stylename!r}") from err
return _cls(**{**_args, **kwargs})
@classmethod
def get_styles(cls):
"""Return a dictionary of available styles."""
return cls._style_list
@classmethod
def pprint_styles(cls):
"""Return the available styles as pretty-printed string."""
table = [('Class', 'Name', 'Parameters'),
*[(cls.__name__,
# Add backquotes, as - and | have special meaning in reST.
f'``{name}``',
# [1:-1] drops the surrounding parentheses.
str(inspect.signature(cls))[1:-1] or 'None')
for name, cls in cls._style_list.items()]]
# Convert to rst table.
col_len = [max(len(cell) for cell in column) for column in zip(*table)]
table_formatstr = ' '.join('=' * cl for cl in col_len)
rst_table = '\n'.join([
'',
table_formatstr,
' '.join(cell.ljust(cl) for cell, cl in zip(table[0], col_len)),
table_formatstr,
*[' '.join(cell.ljust(cl) for cell, cl in zip(row, col_len))
for row in table[1:]],
table_formatstr,
])
return textwrap.indent(rst_table, prefix=' ' * 4)
@classmethod
@_api.deprecated(
'3.10.0',
message="This method is never used internally.",
alternative="No replacement. Please open an issue if you use this."
)
def register(cls, name, style):
"""Register a new style."""
if not issubclass(style, cls._Base):
raise ValueError(f"{style} must be a subclass of {cls._Base}")
cls._style_list[name] = style
def _register_style(style_list, cls=None, *, name=None):
"""Class decorator that stashes a class in a (style) dictionary."""
if cls is None:
return functools.partial(_register_style, style_list, name=name)
style_list[name or cls.__name__.lower()] = cls
return cls
@_docstring.interpd
class BoxStyle(_Style):
"""
`BoxStyle` is a container class which defines several
boxstyle classes, which are used for `FancyBboxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
The following boxstyle classes are defined.
%(BoxStyle:table)s
An instance of a boxstyle class is a callable object, with the signature ::
__call__(self, x0, y0, width, height, mutation_size) -> Path
*x0*, *y0*, *width* and *height* specify the location and size of the box
to be drawn; *mutation_size* scales the outline properties such as padding.
"""
_style_list = {}
@_register_style(_style_list)
class Square:
"""A square box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
self.pad = pad
def __call__(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
return Path._create_closed(
[(x0, y0), (x1, y0), (x1, y1), (x0, y1)])
@_register_style(_style_list)
class Circle:
"""A circular box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
self.pad = pad
def __call__(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
return Path.circle((x0 + width / 2, y0 + height / 2),
max(width, height) / 2)
@_register_style(_style_list)
class Ellipse:
"""
An elliptical box.
.. versionadded:: 3.7
"""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
self.pad = pad
def __call__(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
a = width / math.sqrt(2)
b = height / math.sqrt(2)
trans = Affine2D().scale(a, b).translate(x0 + width / 2,
y0 + height / 2)
return trans.transform_path(Path.unit_circle())
@_register_style(_style_list)
class LArrow:
"""A box in the shape of a left-pointing arrow."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
self.pad = pad
def __call__(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2
dxx = dx / 2
x0 = x0 + pad / 1.4 # adjust by ~sqrt(2)
return Path._create_closed(
[(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0)])
@_register_style(_style_list)
class RArrow(LArrow):
"""A box in the shape of a right-pointing arrow."""
def __call__(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.__call__(
self, x0, y0, width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
@_register_style(_style_list)
class DArrow:
"""A box in the shape of a two-way arrow."""
# Modified from LArrow to add a right arrow to the bbox.
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
self.pad = pad
def __call__(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
# The width is padded by the arrows, so we don't need to pad it.
height = height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2
dxx = dx / 2
x0 = x0 + pad / 1.4 # adjust by ~sqrt(2)
return Path._create_closed([
(x0 + dxx, y0), (x1, y0), # bot-segment
(x1, y0 - dxx), (x1 + dx + dxx, y0 + dx),
(x1, y1 + dxx), # right-arrow
(x1, y1), (x0 + dxx, y1), # top-segment
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # left-arrow
(x0 + dxx, y0)])
@_register_style(_style_list)
class Round:
"""A box with round corners."""
def __init__(self, pad=0.3, rounding_size=None):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
rounding_size : float, default: *pad*
Radius of the corners.
"""
self.pad = pad
self.rounding_size = rounding_size
def __call__(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the rounding corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2 * pad, height + 2 * pad
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic Bezier, e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
return Path(cp, com)
@_register_style(_style_list)
class Round4:
"""A box with rounded edges."""
def __init__(self, pad=0.3, rounding_size=None):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
rounding_size : float, default: *pad*/2
Rounding of edges.
"""
self.pad = pad
self.rounding_size = rounding_size
def __call__(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# Rounding size; defaults to half of the padding.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width = width + 2 * pad - 2 * dr
height = height + 2 * pad - 2 * dr
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
return Path(cp, com)
@_register_style(_style_list)
class Sawtooth:
"""A box with a sawtooth outline."""
def __init__(self, pad=0.3, tooth_size=None):
"""
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
tooth_size : float, default: *pad*/2
Size of the sawtooth.
"""
self.pad = pad
self.tooth_size = tooth_size
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
hsz = tooth_size / 2
width = width + 2 * pad - tooth_size
height = height + 2 * pad - tooth_size
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = round((width - tooth_size) / (tooth_size * 2)) * 2
dsy_n = round((height - tooth_size) / (tooth_size * 2)) * 2
x0, y0 = x0 - pad + hsz, y0 - pad + hsz
x1, y1 = x0 + width, y0 + height
xs = [
x0, *np.linspace(x0 + hsz, x1 - hsz, 2 * dsx_n + 1), # bottom
*([x1, x1 + hsz, x1, x1 - hsz] * dsy_n)[:2*dsy_n+2], # right
x1, *np.linspace(x1 - hsz, x0 + hsz, 2 * dsx_n + 1), # top
*([x0, x0 - hsz, x0, x0 + hsz] * dsy_n)[:2*dsy_n+2], # left
]
ys = [
*([y0, y0 - hsz, y0, y0 + hsz] * dsx_n)[:2*dsx_n+2], # bottom
y0, *np.linspace(y0 + hsz, y1 - hsz, 2 * dsy_n + 1), # right
*([y1, y1 + hsz, y1, y1 - hsz] * dsx_n)[:2*dsx_n+2], # top
y1, *np.linspace(y1 - hsz, y0 + hsz, 2 * dsy_n + 1), # left
]
return [*zip(xs, ys), (xs[0], ys[0])]
def __call__(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
return Path(saw_vertices, closed=True)
@_register_style(_style_list)
class Roundtooth(Sawtooth):
"""A box with a rounded sawtooth outline."""
def __call__(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([saw_vertices, [saw_vertices[0]]])
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
@_docstring.interpd
class ConnectionStyle(_Style):
"""
`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with `FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(ConnectionStyle:table)s
An instance of any connection style class is a callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a `.Path` instance. *posA* and *posB* are
tuples of (x, y) coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base:
"""
A base class for connectionstyle classes. The subclass needs
to implement a *connect* method whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The method needs to return a path connecting two
points. This base class defines a __call__ method, and a few
helper methods.
"""
def _in_patch(self, patch):
"""
Return a predicate function testing whether a point *xy* is
contained in *patch*.
"""
return lambda xy: patch.contains(
SimpleNamespace(x=xy[0], y=xy[1]))[0]
def _clip(self, path, in_start, in_stop):
"""
Clip *path* at its start by the region where *in_start* returns
True, and at its stop by the region where *in_stop* returns True.
The original path is assumed to start in the *in_start* region and
to stop in the *in_stop* region.
"""
if in_start:
try:
_, path = split_path_inout(path, in_start)
except ValueError:
pass
if in_stop:
try:
path, _ = split_path_inout(path, in_stop)
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Call the *connect* method to create a path between *posA* and
*posB*; then clip and shrink the path.
"""
path = self.connect(posA, posB)
path = self._clip(
path,
self._in_patch(patchA) if patchA else None,
self._in_patch(patchB) if patchB else None,
)
path = self._clip(
path,
inside_circle(*path.vertices[0], shrinkA) if shrinkA else None,
inside_circle(*path.vertices[-1], shrinkB) if shrinkB else None
)
return path
@_register_style(_style_list)
class Arc3(_Base):
"""
Creates a simple quadratic Bézier curve between two
points. The curve is created so that the middle control point
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
Parameters
----------
rad : float
Curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
@_register_style(_style_list)
class Angle3(_Base):
"""
Creates a simple quadratic Bézier curve between two points. The middle
control point is placed at the intersecting point of two lines which
cross the start and end point, and have a slope of *angleA* and
*angleB*, respectively.
"""
def __init__(self, angleA=90, angleB=0):
"""
Parameters
----------
angleA : float
Starting angle of the path.
angleB : float
Ending angle of the path.
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
@_register_style(_style_list)
class Angle(_Base):
"""
Creates a piecewise continuous quadratic Bézier path between two
points. The path has a one passing-through point placed at the
intersecting point of two lines which cross the start and end point,
and have a slope of *angleA* and *angleB*, respectively.
The connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
Parameters
----------
angleA : float
Starting angle of the path.
angleB : float
Ending angle of the path.
rad : float
Rounding radius of the edge.
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = np.hypot(dx1, dy1)
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = np.hypot(dx2, dy2)
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
@_register_style(_style_list)
class Arc(_Base):
"""
Creates a piecewise continuous quadratic Bézier path between two
points. The path can have two passing-through points, a
point placed at the distance of *armA* and angle of *angleA* from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
Parameters
----------
angleA : float
Starting angle of the path.
angleB : float
Ending angle of the path.
armA : float or None
Length of the starting arm.
armB : float or None
Length of the ending arm.
rad : float
Rounding radius of the edges.
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
# x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
@_register_style(_style_list)
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and *armB*. One of the
arms is extended so that they are connected in a right angle. The
length of *armA* is determined by (*armA* + *fraction* x AB distance).
Same for *armB*.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
Parameters
----------
armA : float
Minimum length of armA.
armB : float
Minimum length of armB.
fraction : float
A fraction of the distance between two points that will be
added to armA and armB.
angle : float or None
Angle of the connecting line (if None, parallel to A and B).
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
theta0 = np.deg2rad(self.angle)
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
arm = max(armA, armB)
f = self.fraction * dd + arm
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
def _point_along_a_line(x0, y0, x1, y1, d):
"""
Return the point on the line connecting (*x0*, *y0*) -- (*x1*, *y1*) whose
distance from (*x0*, *y0*) is *d*.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
@_docstring.interpd
class ArrowStyle(_Style):
"""
`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with `FancyArrowPatch`.
An arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(ArrowStyle:table)s
For an overview of the visual appearance, see
:doc:`/gallery/text_labels_and_annotations/fancyarrow_demo`.
An instance of any arrow style class is a callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a `.Path` instance and a boolean
value. *path* is a `.Path` instance along which the arrow
will be drawn. *mutation_size* and *aspect_ratio* have the same
meaning as in `BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
Notes
-----
*angleA* and *angleB* specify the orientation of the bracket, as either a
clockwise or counterclockwise angle depending on the arrow type. 0 degrees
means perpendicular to the line connecting the arrow's head and tail.
.. plot:: gallery/text_labels_and_annotations/angles_on_bracket_arrows.py
"""
_style_list = {}
class _Base:
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
@staticmethod
def ensure_quadratic_bezier(path):
"""
Some ArrowStyle classes only works with a simple quadratic
Bézier curve (created with `.ConnectionStyle.Arc3` or
`.ConnectionStyle.Angle3`). This static method checks if the
provided path is a simple quadratic Bézier curve and returns its
control points if true.
"""
segments = list(path.iter_segments())
if (len(segments) != 2 or segments[0][1] != Path.MOVETO or
segments[1][1] != Path.CURVE3):
raise ValueError(
"'path' is not a valid quadratic Bezier curve")
return [*segments[0][0], *segments[1][0]]
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is the very core of the ArrowStyle class and
must be overridden in the subclasses. It receives the *path*
object along which the arrow will be drawn, and the
*mutation_size*, with which the arrow head etc. will be scaled.
The *linewidth* may be used to adjust the path so that it does not
pass beyond the given points. It returns a tuple of a `.Path`
instance and a boolean. The boolean value indicate whether the
path can be filled or not. The return value can also be a list of
paths and list of booleans of the same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and takes care of the aspect ratio.
"""
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices = path.vertices / [1, aspect_ratio]
path_shrunk = Path(vertices, path.codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrunk,
mutation_size,
linewidth)
if np.iterable(fillable):
# Restore the height
path_list = [Path(p.vertices * [1, aspect_ratio], p.codes)
for p in path_mutated]
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is the concatenation of the original path, and at
most two paths representing the arrow head or bracket at the start
point and at the end point. The arrow heads can be either open
or closed.
"""
arrow = "-"
fillbegin = fillend = False # Whether arrows are filled.
def __init__(self, head_length=.4, head_width=.2, widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2, angleA=0, angleB=0, scaleA=None,
scaleB=None):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head, relative to *mutation_size*.
head_width : float, default: 0.2
Width of the arrow head, relative to *mutation_size*.
widthA, widthB : float, default: 1.0
Width of the bracket.
lengthA, lengthB : float, default: 0.2
Length of the bracket.
angleA, angleB : float, default: 0
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
scaleA, scaleB : float, default: *mutation_size*
The scale of the brackets.
"""
self.head_length, self.head_width = head_length, head_width
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
self._beginarrow_head = False
self._beginarrow_bracket = False
self._endarrow_head = False
self._endarrow_bracket = False
if "-" not in self.arrow:
raise ValueError("arrow must have the '-' between "
"the two heads")
beginarrow, endarrow = self.arrow.split("-", 1)
if beginarrow == "<":
self._beginarrow_head = True
self._beginarrow_bracket = False
elif beginarrow == "<|":
self._beginarrow_head = True
self._beginarrow_bracket = False
self.fillbegin = True
elif beginarrow in ("]", "|"):
self._beginarrow_head = False
self._beginarrow_bracket = True
if endarrow == ">":
self._endarrow_head = True
self._endarrow_bracket = False
elif endarrow == "|>":
self._endarrow_head = True
self._endarrow_bracket = False
self.fillend = True
elif endarrow in ("[", "|"):
self._endarrow_head = False
self._endarrow_bracket = True
super().__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrunken so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = np.hypot(dx, dy)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# Account for division by zero
if cp_distance == 0:
cp_distance = 1
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def _get_bracket(self, x0, y0,
x1, y1, width, length, angle):
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
if angle:
trans = transforms.Affine2D().rotate_deg_around(x0, y0, angle)
vertices_arrow = trans.transform(vertices_arrow)
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
if self._beginarrow_head or self._endarrow_head:
head_length = self.head_length * mutation_size
head_width = self.head_width * mutation_size
head_dist = np.hypot(head_length, head_width)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
scaleA = mutation_size if self.scaleA is None else self.scaleA
scaleB = mutation_size if self.scaleB is None else self.scaleB
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
# If there is no room for an arrow and a line, then skip the arrow
has_begin_arrow = self._beginarrow_head and (x0, y0) != (x1, y1)
verticesA, codesA, ddxA, ddyA = (
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t, linewidth)
if has_begin_arrow
else ([], [], 0, 0)
)
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
# If there is no room for an arrow and a line, then skip the arrow
has_end_arrow = self._endarrow_head and (x2, y2) != (x3, y3)
verticesB, codesB, ddxB, ddyB = (
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t, linewidth)
if has_end_arrow
else ([], [], 0, 0)
)
# This simple code will not work if ddx, ddy is greater than the
# separation between vertices.
paths = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
fills = [False]
if has_begin_arrow:
if self.fillbegin:
paths.append(
Path([*verticesA, (0, 0)], [*codesA, Path.CLOSEPOLY]))
fills.append(True)
else:
paths.append(Path(verticesA, codesA))
fills.append(False)
elif self._beginarrow_bracket:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
verticesA, codesA = self._get_bracket(x0, y0, x1, y1,
self.widthA * scaleA,
self.lengthA * scaleA,
self.angleA)
paths.append(Path(verticesA, codesA))
fills.append(False)
if has_end_arrow:
if self.fillend:
fills.append(True)
paths.append(
Path([*verticesB, (0, 0)], [*codesB, Path.CLOSEPOLY]))
else:
fills.append(False)
paths.append(Path(verticesB, codesB))
elif self._endarrow_bracket:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
verticesB, codesB = self._get_bracket(x0, y0, x1, y1,
self.widthB * scaleB,
self.lengthB * scaleB,
self.angleB)
paths.append(Path(verticesB, codesB))
fills.append(False)
return paths, fills
@_register_style(_style_list, name="-")
class Curve(_Curve):
"""A simple curve without any arrow head."""
def __init__(self): # hide head_length, head_width
# These attributes (whose values come from backcompat) only matter
# if someone modifies beginarrow/etc. on an ArrowStyle instance.
super().__init__(head_length=.2, head_width=.1)
@_register_style(_style_list, name="<-")
class CurveA(_Curve):
"""An arrow with a head at its start point."""
arrow = "<-"
@_register_style(_style_list, name="->")
class CurveB(_Curve):
"""An arrow with a head at its end point."""
arrow = "->"
@_register_style(_style_list, name="<->")
class CurveAB(_Curve):
"""An arrow with heads both at the start and the end point."""
arrow = "<->"
@_register_style(_style_list, name="<|-")
class CurveFilledA(_Curve):
"""An arrow with filled triangle head at the start."""
arrow = "<|-"
@_register_style(_style_list, name="-|>")
class CurveFilledB(_Curve):
"""An arrow with filled triangle head at the end."""
arrow = "-|>"
@_register_style(_style_list, name="<|-|>")
class CurveFilledAB(_Curve):
"""An arrow with filled triangle heads at both ends."""
arrow = "<|-|>"
@_register_style(_style_list, name="]-")
class BracketA(_Curve):
"""An arrow with an outward square bracket at its start."""
arrow = "]-"
def __init__(self, widthA=1., lengthA=0.2, angleA=0):
"""
Parameters
----------
widthA : float, default: 1.0
Width of the bracket.
lengthA : float, default: 0.2
Length of the bracket.
angleA : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)
@_register_style(_style_list, name="-[")
class BracketB(_Curve):
"""An arrow with an outward square bracket at its end."""
arrow = "-["
def __init__(self, widthB=1., lengthB=0.2, angleB=0):
"""
Parameters
----------
widthB : float, default: 1.0
Width of the bracket.
lengthB : float, default: 0.2
Length of the bracket.
angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list, name="]-[")
class BracketAB(_Curve):
"""An arrow with outward square brackets at both ends."""
arrow = "]-["
def __init__(self,
widthA=1., lengthA=0.2, angleA=0,
widthB=1., lengthB=0.2, angleB=0):
"""
Parameters
----------
widthA, widthB : float, default: 1.0
Width of the bracket.
lengthA, lengthB : float, default: 0.2
Length of the bracket.
angleA, angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA,
widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list, name="|-|")
class BarAB(_Curve):
"""An arrow with vertical bars ``|`` at both ends."""
arrow = "|-|"
def __init__(self, widthA=1., angleA=0, widthB=1., angleB=0):
"""
Parameters
----------
widthA, widthB : float, default: 1.0
Width of the bracket.
angleA, angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
@_register_style(_style_list, name=']->')
class BracketCurve(_Curve):
"""
An arrow with an outward square bracket at its start and a head at
the end.
"""
arrow = "]->"
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
Parameters
----------
widthA : float, default: 1.0
Width of the bracket.
lengthA : float, default: 0.2
Length of the bracket.
angleA : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA)
@_register_style(_style_list, name='<-[')
class CurveBracket(_Curve):
"""
An arrow with an outward square bracket at its end and a head at
the start.
"""
arrow = "<-["
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
Parameters
----------
widthB : float, default: 1.0
Width of the bracket.
lengthB : float, default: 0.2
Length of the bracket.
angleB : float, default: 0 degrees
Orientation of the bracket, as a counterclockwise angle.
0 degrees means perpendicular to the line.
"""
super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list)
class Simple(_Base):
"""A simple arrow. Only works with a quadratic Bézier curve."""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
Parameters
----------
head_length : float, default: 0.5
Length of the arrow head.
head_width : float, default: 0.5
Width of the arrow head.
tail_width : float, default: 0.2
Width of the arrow tail.
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super().__init__()
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path, in_f)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = make_wedged_bezier2(arrow_in,
head_width / 2., wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
@_register_style(_style_list)
class Fancy(_Base):
"""A fancy arrow. Only works with a quadratic Bézier curve."""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.4
Width of the arrow head.
tail_width : float, default: 0.4
Width of the arrow tail.
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super().__init__()
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path, in_f)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path, in_f)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = split_bezier_intersecting_with_closedpath(
arrow_path, in_f)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
@_register_style(_style_list)
class Wedge(_Base):
"""
Wedge(?) shape. Only works with a quadratic Bézier curve. The
start point has a width of the *tail_width* and the end point has a
width of 0. At the middle, the width is *shrink_factor*x*tail_width*.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
Parameters
----------
tail_width : float, default: 0.3
Width of the tail.
shrink_factor : float, default: 0.5
Fraction of the arrow width at the middle point.
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super().__init__()
def transmute(self, path, mutation_size, linewidth):
# docstring inherited
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
class FancyBboxPatch(Patch):
"""
A fancy box around a rectangle with lower left at *xy* = (*x*, *y*)
with specified width and height.
`.FancyBboxPatch` is similar to `.Rectangle`, but it draws a fancy box
around the rectangle. The transformation of the rectangle box to the
fancy box is delegated to the style classes defined in `.BoxStyle`.
"""
_edge_default = True
def __str__(self):
s = self.__class__.__name__ + "((%g, %g), width=%g, height=%g)"
return s % (self._x, self._y, self._width, self._height)
@_docstring.interpd
def __init__(self, xy, width, height, boxstyle="round", *,
mutation_scale=1, mutation_aspect=1, **kwargs):
"""
Parameters
----------
xy : (float, float)
The lower left corner of the box.
width : float
The width of the box.
height : float
The height of the box.
boxstyle : str or `~matplotlib.patches.BoxStyle`
The style of the fancy box. This can either be a `.BoxStyle`
instance or a string of the style name and optionally comma
separated attributes (e.g. "Round, pad=0.2"). This string is
passed to `.BoxStyle` to construct a `.BoxStyle` object. See
there for a full documentation.
The following box styles are available:
%(BoxStyle:table)s
mutation_scale : float, default: 1
Scaling factor applied to the attributes of the box style
(e.g. pad or rounding_size).
mutation_aspect : float, default: 1
The height of the rectangle will be squeezed by this value before
the mutation and the mutated box will be stretched by the inverse
of it. For example, this allows different horizontal and vertical
padding.
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Patch` properties
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._x, self._y = xy
self._width = width
self._height = height
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.stale = True
@_docstring.interpd
def set_boxstyle(self, boxstyle=None, **kwargs):
"""
Set the box style, possibly with further attributes.
Attributes from the previous box style are not reused.
Without argument (or with ``boxstyle=None``), the available box styles
are returned as a human-readable string.
Parameters
----------
boxstyle : str or `~matplotlib.patches.BoxStyle`
The style of the box: either a `.BoxStyle` instance, or a string,
which is the style name and optionally comma separated attributes
(e.g. "Round,pad=0.2"). Such a string is used to construct a
`.BoxStyle` object, as documented in that class.
The following box styles are available:
%(BoxStyle:table_and_accepts)s
**kwargs
Additional attributes for the box style. See the table above for
supported parameters.
Examples
--------
::
set_boxstyle("Round,pad=0.2")
set_boxstyle("round", pad=0.2)
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
self._bbox_transmuter = (
BoxStyle(boxstyle, **kwargs)
if isinstance(boxstyle, str) else boxstyle)
self.stale = True
def get_boxstyle(self):
"""Return the boxstyle object."""
return self._bbox_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
Parameters
----------
scale : float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""Return the mutation scale."""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
Parameters
----------
aspect : float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""Return the aspect ratio of the bbox mutation."""
return (self._mutation_aspect if self._mutation_aspect is not None
else 1) # backcompat.
def get_path(self):
"""Return the mutated path of the rectangle."""
boxstyle = self.get_boxstyle()
m_aspect = self.get_mutation_aspect()
# Call boxstyle with y, height squeezed by aspect_ratio.
path = boxstyle(self._x, self._y / m_aspect,
self._width, self._height / m_aspect,
self.get_mutation_scale())
return Path(path.vertices * [1, m_aspect], path.codes) # Unsqueeze y.
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"""Return the left coord of the rectangle."""
return self._x
def get_y(self):
"""Return the bottom coord of the rectangle."""
return self._y
def get_width(self):
"""Return the width of the rectangle."""
return self._width
def get_height(self):
"""Return the height of the rectangle."""
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle.
Parameters
----------
x : float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle.
Parameters
----------
y : float
"""
self._y = y
self.stale = True
def set_width(self, w):
"""
Set the rectangle width.
Parameters
----------
w : float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the rectangle height.
Parameters
----------
h : float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle.
Call signatures::
set_bounds(left, bottom, width, height)
set_bounds((left, bottom, width, height))
Parameters
----------
left, bottom : float
The coordinates of the bottom left corner of the rectangle.
width, height : float
The width/height of the rectangle.
"""
if len(args) == 1:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
"""Return the `.Bbox`."""
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch.
It draws an arrow using the `ArrowStyle`. It is primarily used by the
`~.axes.Axes.annotate` method. For most purposes, use the annotate method for
drawing arrows.
The head and tail positions are fixed at the specified start and end points
of the arrow, but the size and shape (in display coordinates) of the arrow
does not change when the axis is moved or zoomed.
"""
_edge_default = True
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return f"{type(self).__name__}(({x1:g}, {y1:g})->({x2:g}, {y2:g}))"
else:
return f"{type(self).__name__}({self._path_original})"
@_docstring.interpd
def __init__(self, posA=None, posB=None, *,
path=None, arrowstyle="simple", connectionstyle="arc3",
patchA=None, patchB=None, shrinkA=2, shrinkB=2,
mutation_scale=1, mutation_aspect=1, **kwargs):
"""
**Defining the arrow position and path**
There are two ways to define the arrow position and path:
- **Start, end and connection**:
The typical approach is to define the start and end points of the
arrow using *posA* and *posB*. The curve between these two can
further be configured using *connectionstyle*.
If given, the arrow curve is clipped by *patchA* and *patchB*,
allowing it to start/end at the border of these patches.
Additionally, the arrow curve can be shortened by *shrinkA* and *shrinkB*
to create a margin between start/end (after possible clipping) and the
drawn arrow.
- **path**: Alternatively if *path* is provided, an arrow is drawn along
this Path. In this case, *connectionstyle*, *patchA*, *patchB*,
*shrinkA*, and *shrinkB* are ignored.
**Styling**
The *arrowstyle* defines the styling of the arrow head, tail and shaft.
The resulting arrows can be styled further by setting the `.Patch`
properties such as *linewidth*, *color*, *facecolor*, *edgecolor*
etc. via keyword arguments.
Parameters
----------
posA, posB : (float, float), optional
(x, y) coordinates of start and end point of the arrow.
The actually drawn start and end positions may be modified
through *patchA*, *patchB*, *shrinkA*, and *shrinkB*.
*posA*, *posB* are exclusive of *path*.
path : `~matplotlib.path.Path`, optional
If provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
*path* is exclusive of *posA*, *posB*.
arrowstyle : str or `.ArrowStyle`, default: 'simple'
The styling of arrow head, tail and shaft. This can be
- `.ArrowStyle` or one of its subclasses
- The shorthand string name (e.g. "->") as given in the table below,
optionally containing a comma-separated list of style parameters,
e.g. "->, head_length=10, head_width=5".
The style parameters are scaled by *mutation_scale*.
The following arrow styles are available. See also
:doc:`/gallery/text_labels_and_annotations/fancyarrow_demo`.
%(ArrowStyle:table)s
Only the styles ``<|-``, ``-|>``, ``<|-|>`` ``simple``, ``fancy``
and ``wedge`` contain closed paths and can be filled.
connectionstyle : str or `.ConnectionStyle` or None, optional, \
default: 'arc3'
`.ConnectionStyle` with which *posA* and *posB* are connected.
This can be
- `.ConnectionStyle` or one of its subclasses
- The shorthand string name as given in the table below, e.g. "arc3".
%(ConnectionStyle:table)s
Ignored if *path* is provided.
patchA, patchB : `~matplotlib.patches.Patch`, default: None
Optional Patches at *posA* and *posB*, respectively. If given,
the arrow path is clipped by these patches such that head and tail
are at the border of the patches.
Ignored if *path* is provided.
shrinkA, shrinkB : float, default: 2
Shorten the arrow path at *posA* and *posB* by this amount in points.
This allows to add a margin between the intended start/end points and
the arrow.
Ignored if *path* is provided.
mutation_scale : float, default: 1
Value with which attributes of *arrowstyle* (e.g., *head_length*)
will be scaled.
mutation_aspect : None or float, default: None
The height of the rectangle will be squeezed by this value before
the mutation and the mutated box will be stretched by the inverse
of it.
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Patch` properties, optional
Here is a list of available `.Patch` properties:
%(Patch:kwdoc)s
In contrast to other patches, the default ``capstyle`` and
``joinstyle`` for `FancyArrowPatch` are set to ``"round"``.
"""
# Traditionally, the cap- and joinstyle for FancyArrowPatch are round
kwargs.setdefault("joinstyle", JoinStyle.round)
kwargs.setdefault("capstyle", CapStyle.round)
super().__init__(**kwargs)
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
else:
raise ValueError("Either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self._dpi_cor = 1.0
def set_positions(self, posA, posB):
"""
Set the start and end positions of the connecting path.
Parameters
----------
posA, posB : None, tuple
(x, y) coordinates of arrow tail and arrow head respectively. If
`None` use current value.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
def set_patchA(self, patchA):
"""
Set the tail patch.
Parameters
----------
patchA : `.patches.Patch`
"""
self.patchA = patchA
self.stale = True
def set_patchB(self, patchB):
"""
Set the head patch.
Parameters
----------
patchB : `.patches.Patch`
"""
self.patchB = patchB
self.stale = True
@_docstring.interpd
def set_connectionstyle(self, connectionstyle=None, **kwargs):
"""
Set the connection style, possibly with further attributes.
Attributes from the previous connection style are not reused.
Without argument (or with ``connectionstyle=None``), the available box
styles are returned as a human-readable string.
Parameters
----------
connectionstyle : str or `~matplotlib.patches.ConnectionStyle`
The style of the connection: either a `.ConnectionStyle` instance,
or a string, which is the style name and optionally comma separated
attributes (e.g. "Arc,armA=30,rad=10"). Such a string is used to
construct a `.ConnectionStyle` object, as documented in that class.
The following connection styles are available:
%(ConnectionStyle:table_and_accepts)s
**kwargs
Additional attributes for the connection style. See the table above
for supported parameters.
Examples
--------
::
set_connectionstyle("Arc,armA=30,rad=10")
set_connectionstyle("arc", armA=30, rad=10)
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
self._connector = (
ConnectionStyle(connectionstyle, **kwargs)
if isinstance(connectionstyle, str) else connectionstyle)
self.stale = True
def get_connectionstyle(self):
"""Return the `ConnectionStyle` used."""
return self._connector
@_docstring.interpd
def set_arrowstyle(self, arrowstyle=None, **kwargs):
"""
Set the arrow style, possibly with further attributes.
Attributes from the previous arrow style are not reused.
Without argument (or with ``arrowstyle=None``), the available box
styles are returned as a human-readable string.
Parameters
----------
arrowstyle : str or `~matplotlib.patches.ArrowStyle`
The style of the arrow: either a `.ArrowStyle` instance, or a
string, which is the style name and optionally comma separated
attributes (e.g. "Fancy,head_length=0.2"). Such a string is used to
construct a `.ArrowStyle` object, as documented in that class.
The following arrow styles are available:
%(ArrowStyle:table_and_accepts)s
**kwargs
Additional attributes for the arrow style. See the table above for
supported parameters.
Examples
--------
::
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
self._arrow_transmuter = (
ArrowStyle(arrowstyle, **kwargs)
if isinstance(arrowstyle, str) else arrowstyle)
self.stale = True
def get_arrowstyle(self):
"""Return the arrowstyle object."""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
Parameters
----------
scale : float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
Returns
-------
scalar
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
Parameters
----------
aspect : float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""Return the aspect ratio of the bbox mutation."""
return (self._mutation_aspect if self._mutation_aspect is not None
else 1) # backcompat.
def get_path(self):
"""Return the path of the arrow in the data coordinates."""
# The path is generated in display coordinates, then converted back to
# data coordinates.
_path, fillable = self._get_path_in_displaycoord()
if np.iterable(fillable):
_path = Path.make_compound_path(*_path)
return self.get_transform().inverted().transform_path(_path)
def _get_path_in_displaycoord(self):
"""Return the mutated path of the arrow in display coordinates."""
dpi_cor = self._dpi_cor
if self._posA_posB is not None:
posA = self._convert_xy_units(self._posA_posB[0])
posB = self._convert_xy_units(self._posA_posB[1])
(posA, posB) = self.get_transform().transform((posA, posB))
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect())
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
# FIXME: dpi_cor is for the dpi-dependency of the linewidth. There
# could be room for improvement. Maybe _get_path_in_displaycoord could
# take a renderer argument, but get_path should be adapted too.
self._dpi_cor = renderer.points_to_pixels(1.)
path, fillable = self._get_path_in_displaycoord()
if not np.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
self._draw_paths_with_artist_properties(
renderer,
[(p, affine, self._facecolor if f and self._facecolor[3] else None)
for p, f in zip(path, fillable)])
class ConnectionPatch(FancyArrowPatch):
"""A patch that connects two points (possibly in different Axes)."""
def __str__(self):
return "ConnectionPatch((%g, %g), (%g, %g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@_docstring.interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None, *,
axesA=None, axesB=None,
arrowstyle="-",
connectionstyle="arc3",
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*.
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for `matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
==================== ==================================================
Property Description
==================== ==================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0, 0 is lower left of figure and 1, 1 is upper
right
'subfigure points' points from the lower left corner of the subfigure
'subfigure pixels' pixels from the lower left corner of the subfigure
'subfigure fraction' fraction of the subfigure, 0, 0 is lower left.
'axes points' points from lower left corner of the Axes
'axes pixels' pixels from lower left corner of the Axes
'axes fraction' 0, 0 is lower left of Axes and 1, 1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you are
using a polar Axes, you do not need to specify
polar for the coordinate system since that is the
native "data" coordinate system.
==================== ==================================================
Alternatively they can be set to any valid
`~matplotlib.transforms.Transform`.
Note that 'subfigure pixels' and 'figure pixels' are the same
for the parent figure, so users who want code that is usable in
a subfigure can use 'subfigure pixels'.
.. note::
Using `ConnectionPatch` across two `~.axes.Axes` instances
is not directly compatible with :ref:`constrained layout
`. Add the artist
directly to the `.Figure` instead of adding it to a specific Axes,
or exclude it from the layout using ``con.set_in_layout(False)``.
.. code-block:: default
fig, ax = plt.subplots(1, 2, constrained_layout=True)
con = ConnectionPatch(..., axesA=ax[0], axesB=ax[1])
fig.add_artist(con)
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
super().__init__(posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
connectionstyle=connectionstyle,
patchA=patchA, patchB=patchB,
shrinkA=shrinkA, shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
**kwargs)
# if True, draw annotation only if self.xy is inside the Axes
self._annotation_clip = None
def _get_xy(self, xy, s, axes=None):
"""Calculate the pixel position of given point."""
s0 = s # For the error message, if needed.
if axes is None:
axes = self.axes
# preserve mixed type input (such as str, int)
x = np.array(xy[0])
y = np.array(xy[1])
fig = self.get_figure(root=False)
if s in ["figure points", "axes points"]:
x = x * fig.dpi / 72
y = y * fig.dpi / 72
s = s.replace("points", "pixels")
elif s == "figure fraction":
s = fig.transFigure
elif s == "subfigure fraction":
s = fig.transSubfigure
elif s == "axes fraction":
s = axes.transAxes
if s == 'data':
trans = axes.transData
x = cbook._to_unmasked_float_array(axes.xaxis.convert_units(x))
y = cbook._to_unmasked_float_array(axes.yaxis.convert_units(y))
return trans.transform((x, y))
elif s == 'offset points':
if self.xycoords == 'offset points': # prevent recursion
return self._get_xy(self.xy, 'data')
return (
self._get_xy(self.xy, self.xycoords) # converted data point
+ xy * self.get_figure(root=True).dpi / 72) # converted offset
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform((x, y))
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
bb = self.get_figure(root=False).figbbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif s == 'subfigure pixels':
# pixels from the lower left corner of the figure
bb = self.get_figure(root=False).bbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif s == 'axes pixels':
# pixels from the lower left corner of the Axes
bb = axes.bbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif isinstance(s, transforms.Transform):
return s.transform(xy)
else:
raise ValueError(f"{s0} is not a valid coordinate transformation")
def set_annotation_clip(self, b):
"""
Set the annotation's clipping behavior.
Parameters
----------
b : bool or None
- True: The annotation will be clipped when ``self.xy`` is
outside the Axes.
- False: The annotation will always be drawn.
- None: The annotation will be clipped when ``self.xy`` is
outside the Axes and ``self.xycoords == "data"``.
"""
self._annotation_clip = b
self.stale = True
def get_annotation_clip(self):
"""
Return the clipping behavior.
See `.set_annotation_clip` for the meaning of the return value.
"""
return self._annotation_clip
def _get_path_in_displaycoord(self):
"""Return the mutated path of the arrow in display coordinates."""
dpi_cor = self._dpi_cor
posA = self._get_xy(self.xy1, self.coords1, self.axesA)
posB = self._get_xy(self.xy2, self.coords2, self.axesB)
path = self.get_connectionstyle()(
posA, posB,
patchA=self.patchA, patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor, shrinkB=self.shrinkB * dpi_cor,
)
path, fillable = self.get_arrowstyle()(
path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return path, fillable
def _check_xy(self, renderer):
"""Check whether the annotation needs to be drawn."""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
xy_pixel = self._get_xy(self.xy1, self.coords1, self.axesA)
if self.axesA is None:
axes = self.axes
else:
axes = self.axesA
if not axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
xy_pixel = self._get_xy(self.xy2, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
if not self.get_visible() or not self._check_xy(renderer):
return
super().draw(renderer)
venv\Lib\site-packages\matplotlib\path.py
r"""
A module for dealing with the polylines used throughout Matplotlib.
The primary class for polyline handling in Matplotlib is `Path`. Almost all
vector drawing makes use of `Path`\s somewhere in the drawing pipeline.
Whilst a `Path` instance itself cannot be drawn, some `.Artist` subclasses,
such as `.PathPatch` and `.PathCollection`, can be used for convenient `Path`
visualisation.
"""
import copy
from functools import lru_cache
from weakref import WeakValueDictionary
import numpy as np
import matplotlib as mpl
from . import _api, _path
from .cbook import _to_unmasked_float_array, simple_linear_interpolation
from .bezier import BezierSegment
class Path:
"""
A series of possibly disconnected, possibly closed, line and curve
segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an (N, 2) float array of vertices
- *codes*: an N-length `numpy.uint8` array of path codes, or None
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices and three `CURVE4` codes.
The code types are:
- `STOP` : 1 vertex (ignored)
A marker for the end of the entire path (currently not required and
ignored)
- `MOVETO` : 1 vertex
Pick up the pen and move to the given vertex.
- `LINETO` : 1 vertex
Draw a line from the current position to the given vertex.
- `CURVE3` : 1 control point, 1 endpoint
Draw a quadratic Bézier curve from the current position, with the given
control point, to the given end point.
- `CURVE4` : 2 control points, 1 endpoint
Draw a cubic Bézier curve from the current position, with the given
control points, to the given end point.
- `CLOSEPOLY` : 1 vertex (ignored)
Draw a line segment to the start point of the current polyline.
If *codes* is None, it is interpreted as a `MOVETO` followed by a series
of `LINETO`.
Users of Path objects should not access the vertices and codes arrays
directly. Instead, they should use `iter_segments` or `cleaned` to get the
vertex/code pairs. This helps, in particular, to consistently handle the
case of *codes* being None.
Some behavior of Path objects can be controlled by rcParams. See the
rcParams whose keys start with 'path.'.
.. note::
The vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
code_type = np.uint8
# Path codes
STOP = code_type(0) # 1 vertex
MOVETO = code_type(1) # 1 vertex
LINETO = code_type(2) # 1 vertex
CURVE3 = code_type(3) # 2 vertices
CURVE4 = code_type(4) # 3 vertices
CLOSEPOLY = code_type(79) # 1 vertex
#: A dictionary mapping Path codes to the number of vertices that the
#: code expects.
NUM_VERTICES_FOR_CODE = {STOP: 1,
MOVETO: 1,
LINETO: 1,
CURVE3: 2,
CURVE4: 3,
CLOSEPOLY: 1}
def __init__(self, vertices, codes=None, _interpolation_steps=1,
closed=False, readonly=False):
"""
Create a new path with the given vertices and codes.
Parameters
----------
vertices : (N, 2) array-like
The path vertices, as an array, masked array or sequence of pairs.
Masked values, if any, will be converted to NaNs, which are then
handled correctly by the Agg PathIterator and other consumers of
path data, such as :meth:`iter_segments`.
codes : array-like or None, optional
N-length array of integers representing the codes of the path.
If not None, codes must be the same length as vertices.
If None, *vertices* will be treated as a series of line segments.
_interpolation_steps : int, optional
Used as a hint to certain projections, such as Polar, that this
path should be linearly interpolated immediately before drawing.
This attribute is primarily an implementation detail and is not
intended for public use.
closed : bool, optional
If *codes* is None and closed is True, vertices will be treated as
line segments of a closed polygon. Note that the last vertex will
then be ignored (as the corresponding code will be set to
`CLOSEPOLY`).
readonly : bool, optional
Makes the path behave in an immutable way and sets the vertices
and codes as read-only arrays.
"""
vertices = _to_unmasked_float_array(vertices)
_api.check_shape((None, 2), vertices=vertices)
if codes is not None and len(vertices):
codes = np.asarray(codes, self.code_type)
if codes.ndim != 1 or len(codes) != len(vertices):
raise ValueError("'codes' must be a 1D list or array with the "
"same length of 'vertices'. "
f"Your vertices have shape {vertices.shape} "
f"but your codes have shape {codes.shape}")
if len(codes) and codes[0] != self.MOVETO:
raise ValueError("The first element of 'code' must be equal "
f"to 'MOVETO' ({self.MOVETO}). "
f"Your first code is {codes[0]}")
elif closed and len(vertices):
codes = np.empty(len(vertices), dtype=self.code_type)
codes[0] = self.MOVETO
codes[1:-1] = self.LINETO
codes[-1] = self.CLOSEPOLY
self._vertices = vertices
self._codes = codes
self._interpolation_steps = _interpolation_steps
self._update_values()
if readonly:
self._vertices.flags.writeable = False
if self._codes is not None:
self._codes.flags.writeable = False
self._readonly = True
else:
self._readonly = False
@classmethod
def _fast_from_codes_and_verts(cls, verts, codes, internals_from=None):
"""
Create a Path instance without the expense of calling the constructor.
Parameters
----------
verts : array-like
codes : array
internals_from : Path or None
If not None, another `Path` from which the attributes
``should_simplify``, ``simplify_threshold``, and
``interpolation_steps`` will be copied. Note that ``readonly`` is
never copied, and always set to ``False`` by this constructor.
"""
pth = cls.__new__(cls)
pth._vertices = _to_unmasked_float_array(verts)
pth._codes = codes
pth._readonly = False
if internals_from is not None:
pth._should_simplify = internals_from._should_simplify
pth._simplify_threshold = internals_from._simplify_threshold
pth._interpolation_steps = internals_from._interpolation_steps
else:
pth._should_simplify = True
pth._simplify_threshold = mpl.rcParams['path.simplify_threshold']
pth._interpolation_steps = 1
return pth
@classmethod
def _create_closed(cls, vertices):
"""
Create a closed polygonal path going through *vertices*.
Unlike ``Path(..., closed=True)``, *vertices* should **not** end with
an entry for the CLOSEPATH; this entry is added by `._create_closed`.
"""
v = _to_unmasked_float_array(vertices)
return cls(np.concatenate([v, v[:1]]), closed=True)
def _update_values(self):
self._simplify_threshold = mpl.rcParams['path.simplify_threshold']
self._should_simplify = (
self._simplify_threshold > 0 and
mpl.rcParams['path.simplify'] and
len(self._vertices) >= 128 and
(self._codes is None or np.all(self._codes <= Path.LINETO))
)
@property
def vertices(self):
"""The vertices of the `Path` as an (N, 2) array."""
return self._vertices
@vertices.setter
def vertices(self, vertices):
if self._readonly:
raise AttributeError("Can't set vertices on a readonly Path")
self._vertices = vertices
self._update_values()
@property
def codes(self):
"""
The list of codes in the `Path` as a 1D array.
Each code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4` or
`CLOSEPOLY`. For codes that correspond to more than one vertex
(`CURVE3` and `CURVE4`), that code will be repeated so that the length
of `vertices` and `codes` is always the same.
"""
return self._codes
@codes.setter
def codes(self, codes):
if self._readonly:
raise AttributeError("Can't set codes on a readonly Path")
self._codes = codes
self._update_values()
@property
def simplify_threshold(self):
"""
The fraction of a pixel difference below which vertices will
be simplified out.
"""
return self._simplify_threshold
@simplify_threshold.setter
def simplify_threshold(self, threshold):
self._simplify_threshold = threshold
@property
def should_simplify(self):
"""
`True` if the vertices array should be simplified.
"""
return self._should_simplify
@should_simplify.setter
def should_simplify(self, should_simplify):
self._should_simplify = should_simplify
@property
def readonly(self):
"""
`True` if the `Path` is read-only.
"""
return self._readonly
def copy(self):
"""
Return a shallow copy of the `Path`, which will share the
vertices and codes with the source `Path`.
"""
return copy.copy(self)
def __deepcopy__(self, memo):
"""
Return a deepcopy of the `Path`. The `Path` will not be
readonly, even if the source `Path` is.
"""
# Deepcopying arrays (vertices, codes) strips the writeable=False flag.
cls = type(self)
memo[id(self)] = p = cls.__new__(cls)
for k, v in self.__dict__.items():
setattr(p, k, copy.deepcopy(v, memo))
p._readonly = False
return p
def deepcopy(self, memo=None):
"""
Return a deep copy of the `Path`. The `Path` will not be readonly,
even if the source `Path` is.
Parameters
----------
memo : dict, optional
A dictionary to use for memoizing, passed to `copy.deepcopy`.
Returns
-------
Path
A deep copy of the `Path`, but not readonly.
"""
return copy.deepcopy(self, memo)
@classmethod
def make_compound_path_from_polys(cls, XY):
"""
Make a compound `Path` object to draw a number of polygons with equal
numbers of sides.
.. plot:: gallery/misc/histogram_path.py
Parameters
----------
XY : (numpolys, numsides, 2) array
"""
# for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
# the CLOSEPOLY; the vert for the closepoly is ignored but we still
# need it to keep the codes aligned with the vertices
numpolys, numsides, two = XY.shape
if two != 2:
raise ValueError("The third dimension of 'XY' must be 2")
stride = numsides + 1
nverts = numpolys * stride
verts = np.zeros((nverts, 2))
codes = np.full(nverts, cls.LINETO, dtype=cls.code_type)
codes[0::stride] = cls.MOVETO
codes[numsides::stride] = cls.CLOSEPOLY
for i in range(numsides):
verts[i::stride] = XY[:, i]
return cls(verts, codes)
@classmethod
def make_compound_path(cls, *args):
r"""
Concatenate a list of `Path`\s into a single `Path`, removing all `STOP`\s.
"""
if not args:
return Path(np.empty([0, 2], dtype=np.float32))
vertices = np.concatenate([path.vertices for path in args])
codes = np.empty(len(vertices), dtype=cls.code_type)
i = 0
for path in args:
size = len(path.vertices)
if path.codes is None:
if size:
codes[i] = cls.MOVETO
codes[i+1:i+size] = cls.LINETO
else:
codes[i:i+size] = path.codes
i += size
not_stop_mask = codes != cls.STOP # Remove STOPs, as internal STOPs are a bug.
return cls(vertices[not_stop_mask], codes[not_stop_mask])
def __repr__(self):
return f"Path({self.vertices!r}, {self.codes!r})"
def __len__(self):
return len(self.vertices)
def iter_segments(self, transform=None, remove_nans=True, clip=None,
snap=False, stroke_width=1.0, simplify=None,
curves=True, sketch=None):
"""
Iterate over all curve segments in the path.
Each iteration returns a pair ``(vertices, code)``, where ``vertices``
is a sequence of 1-3 coordinate pairs, and ``code`` is a `Path` code.
Additionally, this method can provide a number of standard cleanups and
conversions to the path.
Parameters
----------
transform : None or :class:`~matplotlib.transforms.Transform`
If not None, the given affine transformation will be applied to the
path.
remove_nans : bool, optional
Whether to remove all NaNs from the path and skip over them using
MOVETO commands.
clip : None or (float, float, float, float), optional
If not None, must be a four-tuple (x1, y1, x2, y2)
defining a rectangle in which to clip the path.
snap : None or bool, optional
If True, snap all nodes to pixels; if False, don't snap them.
If None, snap if the path contains only segments
parallel to the x or y axes, and no more than 1024 of them.
stroke_width : float, optional
The width of the stroke being drawn (used for path snapping).
simplify : None or bool, optional
Whether to simplify the path by removing vertices
that do not affect its appearance. If None, use the
:attr:`should_simplify` attribute. See also :rc:`path.simplify`
and :rc:`path.simplify_threshold`.
curves : bool, optional
If True, curve segments will be returned as curve segments.
If False, all curves will be converted to line segments.
sketch : None or sequence, optional
If not None, must be a 3-tuple of the form
(scale, length, randomness), representing the sketch parameters.
"""
if not len(self):
return
cleaned = self.cleaned(transform=transform,
remove_nans=remove_nans, clip=clip,
snap=snap, stroke_width=stroke_width,
simplify=simplify, curves=curves,
sketch=sketch)
# Cache these object lookups for performance in the loop.
NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
STOP = self.STOP
vertices = iter(cleaned.vertices)
codes = iter(cleaned.codes)
for curr_vertices, code in zip(vertices, codes):
if code == STOP:
break
extra_vertices = NUM_VERTICES_FOR_CODE[code] - 1
if extra_vertices:
for i in range(extra_vertices):
next(codes)
curr_vertices = np.append(curr_vertices, next(vertices))
yield curr_vertices, code
def iter_bezier(self, **kwargs):
"""
Iterate over each Bézier curve (lines included) in a `Path`.
Parameters
----------
**kwargs
Forwarded to `.iter_segments`.
Yields
------
B : `~matplotlib.bezier.BezierSegment`
The Bézier curves that make up the current path. Note in particular
that freestanding points are Bézier curves of order 0, and lines
are Bézier curves of order 1 (with two control points).
code : `~matplotlib.path.Path.code_type`
The code describing what kind of curve is being returned.
`MOVETO`, `LINETO`, `CURVE3`, and `CURVE4` correspond to
Bézier curves with 1, 2, 3, and 4 control points (respectively).
`CLOSEPOLY` is a `LINETO` with the control points correctly
chosen based on the start/end points of the current stroke.
"""
first_vert = None
prev_vert = None
for verts, code in self.iter_segments(**kwargs):
if first_vert is None:
if code != Path.MOVETO:
raise ValueError("Malformed path, must start with MOVETO.")
if code == Path.MOVETO: # a point is like "CURVE1"
first_vert = verts
yield BezierSegment(np.array([first_vert])), code
elif code == Path.LINETO: # "CURVE2"
yield BezierSegment(np.array([prev_vert, verts])), code
elif code == Path.CURVE3:
yield BezierSegment(np.array([prev_vert, verts[:2],
verts[2:]])), code
elif code == Path.CURVE4:
yield BezierSegment(np.array([prev_vert, verts[:2],
verts[2:4], verts[4:]])), code
elif code == Path.CLOSEPOLY:
yield BezierSegment(np.array([prev_vert, first_vert])), code
elif code == Path.STOP:
return
else:
raise ValueError(f"Invalid Path.code_type: {code}")
prev_vert = verts[-2:]
def _iter_connected_components(self):
"""Return subpaths split at MOVETOs."""
if self.codes is None:
yield self
else:
idxs = np.append((self.codes == Path.MOVETO).nonzero()[0], len(self.codes))
for sl in map(slice, idxs, idxs[1:]):
yield Path._fast_from_codes_and_verts(
self.vertices[sl], self.codes[sl], self)
def cleaned(self, transform=None, remove_nans=False, clip=None,
*, simplify=False, curves=False,
stroke_width=1.0, snap=False, sketch=None):
"""
Return a new `Path` with vertices and codes cleaned according to the
parameters.
See Also
--------
Path.iter_segments : for details of the keyword arguments.
"""
vertices, codes = _path.cleanup_path(
self, transform, remove_nans, clip, snap, stroke_width, simplify,
curves, sketch)
pth = Path._fast_from_codes_and_verts(vertices, codes, self)
if not simplify:
pth._should_simplify = False
return pth
def transformed(self, transform):
"""
Return a transformed copy of the path.
See Also
--------
matplotlib.transforms.TransformedPath
A specialized path class that will cache the transformed result and
automatically update when the transform changes.
"""
return Path(transform.transform(self.vertices), self.codes,
self._interpolation_steps)
def contains_point(self, point, transform=None, radius=0.0):
"""
Return whether the area enclosed by the path contains the given point.
The path is always treated as closed; i.e. if the last code is not
`CLOSEPOLY` an implicit segment connecting the last vertex to the first
vertex is assumed.
Parameters
----------
point : (float, float)
The point (x, y) to check.
transform : `~matplotlib.transforms.Transform`, optional
If not ``None``, *point* will be compared to ``self`` transformed
by *transform*; i.e. for a correct check, *transform* should
transform the path into the coordinate system of *point*.
radius : float, default: 0
Additional margin on the path in coordinates of *point*.
The path is extended tangentially by *radius/2*; i.e. if you would
draw the path with a linewidth of *radius*, all points on the line
would still be considered to be contained in the area. Conversely,
negative values shrink the area: Points on the imaginary line
will be considered outside the area.
Returns
-------
bool
Notes
-----
The current algorithm has some limitations:
- The result is undefined for points exactly at the boundary
(i.e. at the path shifted by *radius/2*).
- The result is undefined if there is no enclosed area, i.e. all
vertices are on a straight line.
- If bounding lines start to cross each other due to *radius* shift,
the result is not guaranteed to be correct.
"""
if transform is not None:
transform = transform.frozen()
# `point_in_path` does not handle nonlinear transforms, so we
# transform the path ourselves. If *transform* is affine, letting
# `point_in_path` handle the transform avoids allocating an extra
# buffer.
if transform and not transform.is_affine:
self = transform.transform_path(self)
transform = None
return _path.point_in_path(point[0], point[1], radius, self, transform)
def contains_points(self, points, transform=None, radius=0.0):
"""
Return whether the area enclosed by the path contains the given points.
The path is always treated as closed; i.e. if the last code is not
`CLOSEPOLY` an implicit segment connecting the last vertex to the first
vertex is assumed.
Parameters
----------
points : (N, 2) array
The points to check. Columns contain x and y values.
transform : `~matplotlib.transforms.Transform`, optional
If not ``None``, *points* will be compared to ``self`` transformed
by *transform*; i.e. for a correct check, *transform* should
transform the path into the coordinate system of *points*.
radius : float, default: 0
Additional margin on the path in coordinates of *points*.
The path is extended tangentially by *radius/2*; i.e. if you would
draw the path with a linewidth of *radius*, all points on the line
would still be considered to be contained in the area. Conversely,
negative values shrink the area: Points on the imaginary line
will be considered outside the area.
Returns
-------
length-N bool array
Notes
-----
The current algorithm has some limitations:
- The result is undefined for points exactly at the boundary
(i.e. at the path shifted by *radius/2*).
- The result is undefined if there is no enclosed area, i.e. all
vertices are on a straight line.
- If bounding lines start to cross each other due to *radius* shift,
the result is not guaranteed to be correct.
"""
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result.astype('bool')
def contains_path(self, path, transform=None):
"""
Return whether this (closed) path completely contains the given path.
If *transform* is not ``None``, the path will be transformed before
checking for containment.
"""
if transform is not None:
transform = transform.frozen()
return _path.path_in_path(self, None, path, transform)
def get_extents(self, transform=None, **kwargs):
"""
Get Bbox of the path.
Parameters
----------
transform : `~matplotlib.transforms.Transform`, optional
Transform to apply to path before computing extents, if any.
**kwargs
Forwarded to `.iter_bezier`.
Returns
-------
matplotlib.transforms.Bbox
The extents of the path Bbox([[xmin, ymin], [xmax, ymax]])
"""
from .transforms import Bbox
if transform is not None:
self = transform.transform_path(self)
if self.codes is None:
xys = self.vertices
elif len(np.intersect1d(self.codes, [Path.CURVE3, Path.CURVE4])) == 0:
# Optimization for the straight line case.
# Instead of iterating through each curve, consider
# each line segment's end-points
# (recall that STOP and CLOSEPOLY vertices are ignored)
xys = self.vertices[np.isin(self.codes,
[Path.MOVETO, Path.LINETO])]
else:
xys = []
for curve, code in self.iter_bezier(**kwargs):
# places where the derivative is zero can be extrema
_, dzeros = curve.axis_aligned_extrema()
# as can the ends of the curve
xys.append(curve([0, *dzeros, 1]))
xys = np.concatenate(xys)
if len(xys):
return Bbox([xys.min(axis=0), xys.max(axis=0)])
else:
return Bbox.null()
def intersects_path(self, other, filled=True):
"""
Return whether if this path intersects another given path.
If *filled* is True, then this also returns True if one path completely
encloses the other (i.e., the paths are treated as filled).
"""
return _path.path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Return whether this path intersects a given `~.transforms.Bbox`.
If *filled* is True, then this also returns True if the path completely
encloses the `.Bbox` (i.e., the path is treated as filled).
The bounding box is always considered filled.
"""
return _path.path_intersects_rectangle(
self, bbox.x0, bbox.y0, bbox.x1, bbox.y1, filled)
def interpolated(self, steps):
"""
Return a new path with each segment divided into *steps* parts.
Codes other than `LINETO`, `MOVETO`, and `CLOSEPOLY` are not handled correctly.
Parameters
----------
steps : int
The number of segments in the new path for each in the original.
Returns
-------
Path
The interpolated path.
"""
if steps == 1 or len(self) == 0:
return self
if self.codes is not None and self.MOVETO in self.codes[1:]:
return self.make_compound_path(
*(p.interpolated(steps) for p in self._iter_connected_components()))
if self.codes is not None and self.CLOSEPOLY in self.codes and not np.all(
self.vertices[self.codes == self.CLOSEPOLY] == self.vertices[0]):
vertices = self.vertices.copy()
vertices[self.codes == self.CLOSEPOLY] = vertices[0]
else:
vertices = self.vertices
vertices = simple_linear_interpolation(vertices, steps)
codes = self.codes
if codes is not None:
new_codes = np.full((len(codes) - 1) * steps + 1, Path.LINETO,
dtype=self.code_type)
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
"""
Convert this path to a list of polygons or polylines. Each
polygon/polyline is an (N, 2) array of vertices. In other words,
each polygon has no `MOVETO` instructions or curves. This
is useful for displaying in backends that do not support
compound paths or Bézier curves.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
The resulting polygons will be simplified if the
:attr:`Path.should_simplify` attribute of the path is `True`.
If *closed_only* is `True` (default), only closed polygons,
with the last point being the same as the first point, will be
returned. Any unclosed polylines in the path will be
explicitly closed. If *closed_only* is `False`, any unclosed
polygons in the path will be returned as unclosed polygons,
and the closed polygons will be returned explicitly closed by
setting the last point to the same as the first point.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
vertices = self.vertices
if closed_only:
if len(vertices) < 3:
return []
elif np.any(vertices[0] != vertices[-1]):
vertices = [*vertices, vertices[0]]
if transform is None:
return [vertices]
else:
return [transform.transform(vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return _path.convert_path_to_polygons(
self, transform, width, height, closed_only)
_unit_rectangle = None
@classmethod
def unit_rectangle(cls):
"""
Return a `Path` instance of the unit rectangle from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = cls([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]],
closed=True, readonly=True)
return cls._unit_rectangle
_unit_regular_polygons = WeakValueDictionary()
@classmethod
def unit_regular_polygon(cls, numVertices):
"""
Return a :class:`Path` instance for a unit regular polygon with the
given *numVertices* such that the circumscribing circle has radius 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = ((2 * np.pi / numVertices) * np.arange(numVertices + 1)
# This initial rotation is to make sure the polygon always
# "points-up".
+ np.pi / 2)
verts = np.column_stack((np.cos(theta), np.sin(theta)))
path = cls(verts, closed=True, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[numVertices] = path
return path
_unit_regular_stars = WeakValueDictionary()
@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
Return a :class:`Path` for a unit regular star with the given
numVertices and radius of 1.0, centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = (r * np.vstack((np.cos(theta), np.sin(theta)))).T
path = cls(verts, closed=True, readonly=True)
if numVertices <= 16:
cls._unit_regular_stars[(numVertices, innerCircle)] = path
return path
@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
Return a :class:`Path` for a unit regular asterisk with the given
numVertices and radius of 1.0, centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
_unit_circle = None
@classmethod
def unit_circle(cls):
"""
Return the readonly :class:`Path` of the unit circle.
For most cases, :func:`Path.circle` will be what you want.
"""
if cls._unit_circle is None:
cls._unit_circle = cls.circle(center=(0, 0), radius=1,
readonly=True)
return cls._unit_circle
@classmethod
def circle(cls, center=(0., 0.), radius=1., readonly=False):
"""
Return a `Path` representing a circle of a given radius and center.
Parameters
----------
center : (float, float), default: (0, 0)
The center of the circle.
radius : float, default: 1
The radius of the circle.
readonly : bool
Whether the created path should have the "readonly" argument
set when creating the Path instance.
Notes
-----
The circle is approximated using 8 cubic Bézier curves, as described in
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines `_.
"""
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array([[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
dtype=float)
codes = [cls.CURVE4] * 26
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
return Path(vertices * radius + center, codes, readonly=readonly)
_unit_circle_righthalf = None
@classmethod
def unit_circle_righthalf(cls):
"""
Return a `Path` of the right half of a unit circle.
See `Path.circle` for the reference on the approximation used.
"""
if cls._unit_circle_righthalf is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[0.0, -1.0]],
float)
codes = np.full(14, cls.CURVE4, dtype=cls.code_type)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
return cls._unit_circle_righthalf
@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
Return a `Path` for the unit circle arc from angles *theta1* to
*theta2* (in degrees).
*theta2* is unwrapped to produce the shortest arc within 360 degrees.
That is, if *theta2* > *theta1* + 360, the arc will be from *theta1* to
*theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
`_.
"""
halfpi = np.pi * 0.5
eta1 = theta1
eta2 = theta2 - 360 * np.floor((theta2 - theta1) / 360)
# Ensure 2pi range is not flattened to 0 due to floating-point errors,
# but don't try to expand existing 0 range.
if theta2 != theta1 and eta2 <= eta1:
eta2 += 360
eta1, eta2 = np.deg2rad([eta1, eta2])
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), float)
codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [cls.MOVETO, cls.LINETO]
codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.empty((length, 2), float)
codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = cls.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return cls(vertices, codes, readonly=True)
@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
Return a `Path` for the unit circle wedge from angles *theta1* to
*theta2* (in degrees).
*theta2* is unwrapped to produce the shortest wedge within 360 degrees.
That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1*
to *theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
See `Path.arc` for the reference on the approximation used.
"""
return cls.arc(theta1, theta2, n, True)
@staticmethod
@lru_cache(8)
def hatch(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates a `Path` that
can be used in a repeated hatching pattern. *density* is the
number of lines per unit square.
"""
from matplotlib.hatch import get_path
return (get_path(hatchpattern, density)
if hatchpattern is not None else None)
def clip_to_bbox(self, bbox, inside=True):
"""
Clip the path to the given bounding box.
The path must be made up of one or more closed polygons. This
algorithm will not behave correctly for unclosed paths.
If *inside* is `True`, clip to the inside of the box, otherwise
to the outside of the box.
"""
verts = _path.clip_path_to_rect(self, bbox, inside)
paths = [Path(poly) for poly in verts]
return self.make_compound_path(*paths)
def get_path_collection_extents(
master_transform, paths, transforms, offsets, offset_transform):
r"""
Get bounding box of a `.PathCollection`\s internal objects.
That is, given a sequence of `Path`\s, `.Transform`\s objects, and offsets, as found
in a `.PathCollection`, return the bounding box that encapsulates all of them.
Parameters
----------
master_transform : `~matplotlib.transforms.Transform`
Global transformation applied to all paths.
paths : list of `Path`
transforms : list of `~matplotlib.transforms.Affine2DBase`
If non-empty, this overrides *master_transform*.
offsets : (N, 2) array-like
offset_transform : `~matplotlib.transforms.Affine2DBase`
Transform applied to the offsets before offsetting the path.
Notes
-----
The way that *paths*, *transforms* and *offsets* are combined follows the same
method as for collections: each is iterated over independently, so if you have 3
paths (A, B, C), 2 transforms (α, β) and 1 offset (O), their combinations are as
follows:
- (A, α, O)
- (B, β, O)
- (C, α, O)
"""
from .transforms import Bbox
if len(paths) == 0:
raise ValueError("No paths provided")
if len(offsets) == 0:
raise ValueError("No offsets provided")
extents, minpos = _path.get_path_collection_extents(
master_transform, paths, np.atleast_3d(transforms),
offsets, offset_transform)
return Bbox.from_extents(*extents, minpos=minpos)
venv\Lib\site-packages\matplotlib\patheffects.py
"""
Defines classes for path effects. The path effects are supported in `.Text`,
`.Line2D` and `.Patch`.
.. seealso::
:ref:`patheffects_guide`
"""
from matplotlib.backend_bases import RendererBase
from matplotlib import colors as mcolors
from matplotlib import patches as mpatches
from matplotlib import transforms as mtransforms
from matplotlib.path import Path
import numpy as np
class AbstractPathEffect:
"""
A base class for path effects.
Subclasses should override the ``draw_path`` method to add effect
functionality.
"""
def __init__(self, offset=(0., 0.)):
"""
Parameters
----------
offset : (float, float), default: (0, 0)
The (x, y) offset to apply to the path, measured in points.
"""
self._offset = offset
def _offset_transform(self, renderer):
"""Apply the offset to the given transform."""
return mtransforms.Affine2D().translate(
*map(renderer.points_to_pixels, self._offset))
def _update_gc(self, gc, new_gc_dict):
"""
Update the given GraphicsContext with the given dict of properties.
The keys in the dictionary are used to identify the appropriate
``set_`` method on the *gc*.
"""
new_gc_dict = new_gc_dict.copy()
dashes = new_gc_dict.pop("dashes", None)
if dashes:
gc.set_dashes(**dashes)
for k, v in new_gc_dict.items():
set_method = getattr(gc, 'set_' + k, None)
if not callable(set_method):
raise AttributeError(f'Unknown property {k}')
set_method(v)
return gc
def draw_path(self, renderer, gc, tpath, affine, rgbFace=None):
"""
Derived should override this method. The arguments are the same
as :meth:`matplotlib.backend_bases.RendererBase.draw_path`
except the first argument is a renderer.
"""
# Get the real renderer, not a PathEffectRenderer.
if isinstance(renderer, PathEffectRenderer):
renderer = renderer._renderer
return renderer.draw_path(gc, tpath, affine, rgbFace)
class PathEffectRenderer(RendererBase):
"""
Implements a Renderer which contains another renderer.
This proxy then intercepts draw calls, calling the appropriate
:class:`AbstractPathEffect` draw method.
.. note::
Not all methods have been overridden on this RendererBase subclass.
It may be necessary to add further methods to extend the PathEffects
capabilities further.
"""
def __init__(self, path_effects, renderer):
"""
Parameters
----------
path_effects : iterable of :class:`AbstractPathEffect`
The path effects which this renderer represents.
renderer : `~matplotlib.backend_bases.RendererBase` subclass
"""
self._path_effects = path_effects
self._renderer = renderer
def copy_with_path_effect(self, path_effects):
return self.__class__(path_effects, self._renderer)
def __getattribute__(self, name):
if name in ['flipy', 'get_canvas_width_height', 'new_gc',
'points_to_pixels', '_text2path', 'height', 'width']:
return getattr(self._renderer, name)
else:
return object.__getattribute__(self, name)
def draw_path(self, gc, tpath, affine, rgbFace=None):
for path_effect in self._path_effects:
path_effect.draw_path(self._renderer, gc, tpath, affine,
rgbFace)
def draw_markers(
self, gc, marker_path, marker_trans, path, *args, **kwargs):
# We do a little shimmy so that all markers are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return super().draw_markers(gc, marker_path, marker_trans, path,
*args, **kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_markers(gc, marker_path, marker_trans, path,
*args, **kwargs)
def draw_path_collection(self, gc, master_transform, paths, *args,
**kwargs):
# We do a little shimmy so that all paths are drawn for each path
# effect in turn. Essentially, we induce recursion (depth 1) which is
# terminated once we have just a single path effect to work with.
if len(self._path_effects) == 1:
# Call the base path effect function - this uses the unoptimised
# approach of calling "draw_path" multiple times.
return super().draw_path_collection(gc, master_transform, paths,
*args, **kwargs)
for path_effect in self._path_effects:
renderer = self.copy_with_path_effect([path_effect])
# Recursively call this method, only next time we will only have
# one path effect.
renderer.draw_path_collection(gc, master_transform, paths,
*args, **kwargs)
def open_group(self, s, gid=None):
return self._renderer.open_group(s, gid)
def close_group(self, s):
return self._renderer.close_group(s)
class Normal(AbstractPathEffect):
"""
The "identity" PathEffect.
The Normal PathEffect's sole purpose is to draw the original artist with
no special path effect.
"""
def _subclass_with_normal(effect_class):
"""
Create a PathEffect class combining *effect_class* and a normal draw.
"""
class withEffect(effect_class):
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
super().draw_path(renderer, gc, tpath, affine, rgbFace)
renderer.draw_path(gc, tpath, affine, rgbFace)
withEffect.__name__ = f"with{effect_class.__name__}"
withEffect.__qualname__ = f"with{effect_class.__name__}"
withEffect.__doc__ = f"""
A shortcut PathEffect for applying `.{effect_class.__name__}` and then
drawing the original Artist.
With this class you can use ::
artist.set_path_effects([patheffects.with{effect_class.__name__}()])
as a shortcut for ::
artist.set_path_effects([patheffects.{effect_class.__name__}(),
patheffects.Normal()])
"""
# Docstring inheritance doesn't work for locally-defined subclasses.
withEffect.draw_path.__doc__ = effect_class.draw_path.__doc__
return withEffect
class Stroke(AbstractPathEffect):
"""A line based PathEffect which re-draws a stroke."""
def __init__(self, offset=(0, 0), **kwargs):
"""
The path will be stroked with its gc updated with the given
keyword arguments, i.e., the keyword arguments should be valid
gc parameter values.
"""
super().__init__(offset)
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""Draw the path with updated gc."""
gc0 = renderer.new_gc() # Don't modify gc, but a copy!
gc0.copy_properties(gc)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(
gc0, tpath, affine + self._offset_transform(renderer), rgbFace)
gc0.restore()
withStroke = _subclass_with_normal(effect_class=Stroke)
class SimplePatchShadow(AbstractPathEffect):
"""A simple shadow via a filled patch."""
def __init__(self, offset=(2, -2),
shadow_rgbFace=None, alpha=None,
rho=0.3, **kwargs):
"""
Parameters
----------
offset : (float, float), default: (2, -2)
The (x, y) offset of the shadow in points.
shadow_rgbFace : :mpltype:`color`
The shadow color.
alpha : float, default: 0.3
The alpha transparency of the created shadow patch.
rho : float, default: 0.3
A scale factor to apply to the rgbFace color if *shadow_rgbFace*
is not specified.
**kwargs
Extra keywords are stored and passed through to
:meth:`AbstractPathEffect._update_gc`.
"""
super().__init__(offset)
if shadow_rgbFace is None:
self._shadow_rgbFace = shadow_rgbFace
else:
self._shadow_rgbFace = mcolors.to_rgba(shadow_rgbFace)
if alpha is None:
alpha = 0.3
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
gc0 = renderer.new_gc() # Don't modify gc, but a copy!
gc0.copy_properties(gc)
if self._shadow_rgbFace is None:
r, g, b = (rgbFace or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_rgbFace
gc0.set_foreground("none")
gc0.set_alpha(self._alpha)
gc0.set_linewidth(0)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(
gc0, tpath, affine + self._offset_transform(renderer),
shadow_rgbFace)
gc0.restore()
withSimplePatchShadow = _subclass_with_normal(effect_class=SimplePatchShadow)
class SimpleLineShadow(AbstractPathEffect):
"""A simple shadow via a line."""
def __init__(self, offset=(2, -2),
shadow_color='k', alpha=0.3, rho=0.3, **kwargs):
"""
Parameters
----------
offset : (float, float), default: (2, -2)
The (x, y) offset to apply to the path, in points.
shadow_color : :mpltype:`color`, default: 'black'
The shadow color.
A value of ``None`` takes the original artist's color
with a scale factor of *rho*.
alpha : float, default: 0.3
The alpha transparency of the created shadow patch.
rho : float, default: 0.3
A scale factor to apply to the rgbFace color if *shadow_color*
is ``None``.
**kwargs
Extra keywords are stored and passed through to
:meth:`AbstractPathEffect._update_gc`.
"""
super().__init__(offset)
if shadow_color is None:
self._shadow_color = shadow_color
else:
self._shadow_color = mcolors.to_rgba(shadow_color)
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
gc0 = renderer.new_gc() # Don't modify gc, but a copy!
gc0.copy_properties(gc)
if self._shadow_color is None:
r, g, b = (gc0.get_foreground() or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_color
gc0.set_foreground(shadow_rgbFace)
gc0.set_alpha(self._alpha)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(
gc0, tpath, affine + self._offset_transform(renderer))
gc0.restore()
class PathPatchEffect(AbstractPathEffect):
"""
Draws a `.PathPatch` instance whose Path comes from the original
PathEffect artist.
"""
def __init__(self, offset=(0, 0), **kwargs):
"""
Parameters
----------
offset : (float, float), default: (0, 0)
The (x, y) offset to apply to the path, in points.
**kwargs
All keyword arguments are passed through to the
:class:`~matplotlib.patches.PathPatch` constructor. The
properties which cannot be overridden are "path", "clip_box"
"transform" and "clip_path".
"""
super().__init__(offset=offset)
self.patch = mpatches.PathPatch([], **kwargs)
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
self.patch._path = tpath
self.patch.set_transform(affine + self._offset_transform(renderer))
self.patch.set_clip_box(gc.get_clip_rectangle())
clip_path = gc.get_clip_path()
if clip_path and self.patch.get_clip_path() is None:
self.patch.set_clip_path(*clip_path)
self.patch.draw(renderer)
class TickedStroke(AbstractPathEffect):
"""
A line-based PathEffect which draws a path with a ticked style.
This line style is frequently used to represent constraints in
optimization. The ticks may be used to indicate that one side
of the line is invalid or to represent a closed boundary of a
domain (i.e. a wall or the edge of a pipe).
The spacing, length, and angle of ticks can be controlled.
This line style is sometimes referred to as a hatched line.
See also the :doc:`/gallery/misc/tickedstroke_demo` example.
"""
def __init__(self, offset=(0, 0),
spacing=10.0, angle=45.0, length=np.sqrt(2),
**kwargs):
"""
Parameters
----------
offset : (float, float), default: (0, 0)
The (x, y) offset to apply to the path, in points.
spacing : float, default: 10.0
The spacing between ticks in points.
angle : float, default: 45.0
The angle between the path and the tick in degrees. The angle
is measured as if you were an ant walking along the curve, with
zero degrees pointing directly ahead, 90 to your left, -90
to your right, and 180 behind you. To change side of the ticks,
change sign of the angle.
length : float, default: 1.414
The length of the tick relative to spacing.
Recommended length = 1.414 (sqrt(2)) when angle=45, length=1.0
when angle=90 and length=2.0 when angle=60.
**kwargs
Extra keywords are stored and passed through to
:meth:`AbstractPathEffect._update_gc`.
Examples
--------
See :doc:`/gallery/misc/tickedstroke_demo`.
"""
super().__init__(offset)
self._spacing = spacing
self._angle = angle
self._length = length
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""Draw the path with updated gc."""
# Do not modify the input! Use copy instead.
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
gc0 = self._update_gc(gc0, self._gc)
trans = affine + self._offset_transform(renderer)
theta = -np.radians(self._angle)
trans_matrix = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
# Convert spacing parameter to pixels.
spacing_px = renderer.points_to_pixels(self._spacing)
# Transform before evaluation because to_polygons works at resolution
# of one -- assuming it is working in pixel space.
transpath = affine.transform_path(tpath)
# Evaluate path to straight line segments that can be used to
# construct line ticks.
polys = transpath.to_polygons(closed_only=False)
for p in polys:
x = p[:, 0]
y = p[:, 1]
# Can not interpolate points or draw line if only one point in
# polyline.
if x.size < 2:
continue
# Find distance between points on the line
ds = np.hypot(x[1:] - x[:-1], y[1:] - y[:-1])
# Build parametric coordinate along curve
s = np.concatenate(([0.0], np.cumsum(ds)))
s_total = s[-1]
num = int(np.ceil(s_total / spacing_px)) - 1
# Pick parameter values for ticks.
s_tick = np.linspace(spacing_px/2, s_total - spacing_px/2, num)
# Find points along the parameterized curve
x_tick = np.interp(s_tick, s, x)
y_tick = np.interp(s_tick, s, y)
# Find unit vectors in local direction of curve
delta_s = self._spacing * .001
u = (np.interp(s_tick + delta_s, s, x) - x_tick) / delta_s
v = (np.interp(s_tick + delta_s, s, y) - y_tick) / delta_s
# Normalize slope into unit slope vector.
n = np.hypot(u, v)
mask = n == 0
n[mask] = 1.0
uv = np.array([u / n, v / n]).T
uv[mask] = np.array([0, 0]).T
# Rotate and scale unit vector into tick vector
dxy = np.dot(uv, trans_matrix) * self._length * spacing_px
# Build tick endpoints
x_end = x_tick + dxy[:, 0]
y_end = y_tick + dxy[:, 1]
# Interleave ticks to form Path vertices
xyt = np.empty((2 * num, 2), dtype=x_tick.dtype)
xyt[0::2, 0] = x_tick
xyt[1::2, 0] = x_end
xyt[0::2, 1] = y_tick
xyt[1::2, 1] = y_end
# Build up vector of Path codes
codes = np.tile([Path.MOVETO, Path.LINETO], num)
# Construct and draw resulting path
h = Path(xyt, codes)
# Transform back to data space during render
renderer.draw_path(gc0, h, affine.inverted() + trans, rgbFace)
gc0.restore()
withTickedStroke = _subclass_with_normal(effect_class=TickedStroke)
venv\Lib\site-packages\matplotlib\pylab.py
"""
`pylab` is a historic interface and its use is strongly discouraged. The equivalent
replacement is `matplotlib.pyplot`. See :ref:`api_interfaces` for a full overview
of Matplotlib interfaces.
`pylab` was designed to support a MATLAB-like way of working with all plotting related
functions directly available in the global namespace. This was achieved through a
wildcard import (``from pylab import *``).
.. warning::
The use of `pylab` is discouraged for the following reasons:
``from pylab import *`` imports all the functions from `matplotlib.pyplot`, `numpy`,
`numpy.fft`, `numpy.linalg`, and `numpy.random`, and some additional functions into
the global namespace.
Such a pattern is considered bad practice in modern python, as it clutters the global
namespace. Even more severely, in the case of `pylab`, this will overwrite some
builtin functions (e.g. the builtin `sum` will be replaced by `numpy.sum`), which
can lead to unexpected behavior.
"""
from matplotlib.cbook import flatten, silent_list
import matplotlib as mpl
from matplotlib.dates import (
date2num, num2date, datestr2num, drange, DateFormatter, DateLocator,
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator, DayLocator,
HourLocator, MinuteLocator, SecondLocator, rrule, MO, TU, WE, TH, FR,
SA, SU, YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY,
relativedelta)
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
## We are still importing too many things from mlab; more cleanup is needed.
from matplotlib.mlab import (
detrend, detrend_linear, detrend_mean, detrend_none, window_hanning,
window_none)
from matplotlib import cbook, mlab, pyplot as plt
from matplotlib.pyplot import *
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
import numpy as np
import numpy.ma as ma
# don't let numpy's datetime hide stdlib
import datetime
# This is needed, or bytes will be numpy.random.bytes from
# "from numpy.random import *" above
bytes = __import__("builtins").bytes
# We also don't want the numpy version of these functions
abs = __import__("builtins").abs
bool = __import__("builtins").bool
max = __import__("builtins").max
min = __import__("builtins").min
pow = __import__("builtins").pow
round = __import__("builtins").round
venv\Lib\site-packages\matplotlib\pyplot.py
# Note: The first part of this file can be modified in place, but the latter
# part is autogenerated by the boilerplate.py script.
"""
`matplotlib.pyplot` is a state-based interface to matplotlib. It provides
an implicit, MATLAB-like, way of plotting. It also opens figures on your
screen, and acts as the figure GUI manager.
pyplot is mainly intended for interactive plots and simple cases of
programmatic plot generation::
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1)
y = np.sin(x)
plt.plot(x, y)
plt.show()
The explicit object-oriented API is recommended for complex plots, though
pyplot is still usually used to create the figure and often the Axes in the
figure. See `.pyplot.figure`, `.pyplot.subplots`, and
`.pyplot.subplot_mosaic` to create figures, and
:doc:`Axes API ` for the plotting methods on an Axes::
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1)
y = np.sin(x)
fig, ax = plt.subplots()
ax.plot(x, y)
plt.show()
See :ref:`api_interfaces` for an explanation of the tradeoffs between the
implicit and explicit interfaces.
"""
# fmt: off
from __future__ import annotations
from contextlib import AbstractContextManager, ExitStack
from enum import Enum
import functools
import importlib
import inspect
import logging
import sys
import threading
import time
from typing import TYPE_CHECKING, cast, overload
from cycler import cycler # noqa: F401
import matplotlib
import matplotlib.colorbar
import matplotlib.image
from matplotlib import _api
# Re-exported (import x as x) for typing.
from matplotlib import get_backend as get_backend, rcParams as rcParams
from matplotlib import cm as cm # noqa: F401
from matplotlib import style as style # noqa: F401
from matplotlib import _pylab_helpers
from matplotlib import interactive # noqa: F401
from matplotlib import cbook
from matplotlib import _docstring
from matplotlib.backend_bases import (
FigureCanvasBase, FigureManagerBase, MouseButton)
from matplotlib.figure import Figure, FigureBase, figaspect
from matplotlib.gridspec import GridSpec, SubplotSpec
from matplotlib import rcsetup, rcParamsDefault, rcParamsOrig
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.axes import Subplot # noqa: F401
from matplotlib.backends import BackendFilter, backend_registry
from matplotlib.projections import PolarAxes
from matplotlib.colorizer import _ColorizerInterface, ColorizingArtist, Colorizer
from matplotlib import mlab # for detrend_none, window_hanning
from matplotlib.scale import get_scale_names # noqa: F401
from matplotlib.cm import _colormaps
from matplotlib.colors import _color_sequences, Colormap
import numpy as np
if TYPE_CHECKING:
from collections.abc import Callable, Hashable, Iterable, Sequence
import datetime
import pathlib
import os
from typing import Any, BinaryIO, Literal, TypeVar
from typing_extensions import ParamSpec
import PIL.Image
from numpy.typing import ArrayLike
import matplotlib.axes
import matplotlib.artist
import matplotlib.backend_bases
from matplotlib.axis import Tick
from matplotlib.axes._base import _AxesBase
from matplotlib.backend_bases import Event
from matplotlib.cm import ScalarMappable
from matplotlib.contour import ContourSet, QuadContourSet
from matplotlib.collections import (
Collection,
FillBetweenPolyCollection,
LineCollection,
PolyCollection,
PathCollection,
EventCollection,
QuadMesh,
)
from matplotlib.colorbar import Colorbar
from matplotlib.container import (
BarContainer,
ErrorbarContainer,
StemContainer,
)
from matplotlib.figure import SubFigure
from matplotlib.legend import Legend
from matplotlib.mlab import GaussianKDE
from matplotlib.image import AxesImage, FigureImage
from matplotlib.patches import FancyArrow, StepPatch, Wedge
from matplotlib.quiver import Barbs, Quiver, QuiverKey
from matplotlib.scale import ScaleBase
from matplotlib.typing import (
ColorType,
CoordsType,
HashableList,
LineStyleType,
MarkerType,
)
from matplotlib.widgets import SubplotTool
_P = ParamSpec('_P')
_R = TypeVar('_R')
_T = TypeVar('_T')
# We may not need the following imports here:
from matplotlib.colors import Normalize
from matplotlib.lines import Line2D, AxLine
from matplotlib.text import Text, Annotation
from matplotlib.patches import Arrow, Circle, Rectangle # noqa: F401
from matplotlib.patches import Polygon
from matplotlib.widgets import Button, Slider, Widget # noqa: F401
from .ticker import ( # noqa: F401
TickHelper, Formatter, FixedFormatter, NullFormatter, FuncFormatter,
FormatStrFormatter, ScalarFormatter, LogFormatter, LogFormatterExponent,
LogFormatterMathtext, Locator, IndexLocator, FixedLocator, NullLocator,
LinearLocator, LogLocator, AutoLocator, MultipleLocator, MaxNLocator)
_log = logging.getLogger(__name__)
# Explicit rename instead of import-as for typing's sake.
colormaps = _colormaps
color_sequences = _color_sequences
@overload
def _copy_docstring_and_deprecators(
method: Any,
func: Literal[None] = None
) -> Callable[[Callable[_P, _R]], Callable[_P, _R]]: ...
@overload
def _copy_docstring_and_deprecators(
method: Any, func: Callable[_P, _R]) -> Callable[_P, _R]: ...
def _copy_docstring_and_deprecators(
method: Any,
func: Callable[_P, _R] | None = None
) -> Callable[[Callable[_P, _R]], Callable[_P, _R]] | Callable[_P, _R]:
if func is None:
return cast('Callable[[Callable[_P, _R]], Callable[_P, _R]]',
functools.partial(_copy_docstring_and_deprecators, method))
decorators: list[Callable[[Callable[_P, _R]], Callable[_P, _R]]] = [
_docstring.copy(method)
]
# Check whether the definition of *method* includes @_api.rename_parameter
# or @_api.make_keyword_only decorators; if so, propagate them to the
# pyplot wrapper as well.
while hasattr(method, "__wrapped__"):
potential_decorator = _api.deprecation.DECORATORS.get(method)
if potential_decorator:
decorators.append(potential_decorator)
method = method.__wrapped__
for decorator in decorators[::-1]:
func = decorator(func)
_add_pyplot_note(func, method)
return func
_NO_PYPLOT_NOTE = [
'FigureBase._gci', # wrapped_func is private
'_AxesBase._sci', # wrapped_func is private
'Artist.findobj', # not a standard pyplot wrapper because it does not operate
# on the current Figure / Axes. Explanation of relation would
# be more complex and is not too important.
]
def _add_pyplot_note(func, wrapped_func):
"""
Add a note to the docstring of *func* that it is a pyplot wrapper.
The note is added to the "Notes" section of the docstring. If that does
not exist, a "Notes" section is created. In numpydoc, the "Notes"
section is the third last possible section, only potentially followed by
"References" and "Examples".
"""
if not func.__doc__:
return # nothing to do
qualname = wrapped_func.__qualname__
if qualname in _NO_PYPLOT_NOTE:
return
wrapped_func_is_method = True
if "." not in qualname:
# method qualnames are prefixed by the class and ".", e.g. "Axes.plot"
wrapped_func_is_method = False
link = f"{wrapped_func.__module__}.{qualname}"
elif qualname.startswith("Axes."): # e.g. "Axes.plot"
link = ".axes." + qualname
elif qualname.startswith("_AxesBase."): # e.g. "_AxesBase.set_xlabel"
link = ".axes.Axes" + qualname[9:]
elif qualname.startswith("Figure."): # e.g. "Figure.figimage"
link = "." + qualname
elif qualname.startswith("FigureBase."): # e.g. "FigureBase.gca"
link = ".Figure" + qualname[10:]
elif qualname.startswith("FigureCanvasBase."): # "FigureBaseCanvas.mpl_connect"
link = "." + qualname
else:
raise RuntimeError(f"Wrapped method from unexpected class: {qualname}")
if wrapped_func_is_method:
message = f"This is the :ref:`pyplot wrapper ` for `{link}`."
else:
message = f"This is equivalent to `{link}`."
# Find the correct insert position:
# - either we already have a "Notes" section into which we can insert
# - or we create one before the next present section. Note that in numpydoc, the
# "Notes" section is the third last possible section, only potentially followed
# by "References" and "Examples".
# - or we append a new "Notes" section at the end.
doc = inspect.cleandoc(func.__doc__)
if "\nNotes\n-----" in doc:
before, after = doc.split("\nNotes\n-----", 1)
elif (index := doc.find("\nReferences\n----------")) != -1:
before, after = doc[:index], doc[index:]
elif (index := doc.find("\nExamples\n--------")) != -1:
before, after = doc[:index], doc[index:]
else:
# No "Notes", "References", or "Examples" --> append to the end.
before = doc + "\n"
after = ""
func.__doc__ = f"{before}\nNotes\n-----\n\n.. note::\n\n {message}\n{after}"
## Global ##
# The state controlled by {,un}install_repl_displayhook().
_ReplDisplayHook = Enum("_ReplDisplayHook", ["NONE", "PLAIN", "IPYTHON"])
_REPL_DISPLAYHOOK = _ReplDisplayHook.NONE
def _draw_all_if_interactive() -> None:
if matplotlib.is_interactive():
draw_all()
def install_repl_displayhook() -> None:
"""
Connect to the display hook of the current shell.
The display hook gets called when the read-evaluate-print-loop (REPL) of
the shell has finished the execution of a command. We use this callback
to be able to automatically update a figure in interactive mode.
This works both with IPython and with vanilla python shells.
"""
global _REPL_DISPLAYHOOK
if _REPL_DISPLAYHOOK is _ReplDisplayHook.IPYTHON:
return
# See if we have IPython hooks around, if so use them.
# Use ``sys.modules.get(name)`` rather than ``name in sys.modules`` as
# entries can also have been explicitly set to None.
mod_ipython = sys.modules.get("IPython")
if not mod_ipython:
_REPL_DISPLAYHOOK = _ReplDisplayHook.PLAIN
return
ip = mod_ipython.get_ipython()
if not ip:
_REPL_DISPLAYHOOK = _ReplDisplayHook.PLAIN
return
ip.events.register("post_execute", _draw_all_if_interactive)
_REPL_DISPLAYHOOK = _ReplDisplayHook.IPYTHON
if mod_ipython.version_info[:2] < (8, 24):
# Use of backend2gui is not needed for IPython >= 8.24 as that functionality
# has been moved to Matplotlib.
# This code can be removed when Python 3.12, the latest version supported by
# IPython < 8.24, reaches end-of-life in late 2028.
from IPython.core.pylabtools import backend2gui
ipython_gui_name = backend2gui.get(get_backend())
else:
_, ipython_gui_name = backend_registry.resolve_backend(get_backend())
# trigger IPython's eventloop integration, if available
if ipython_gui_name:
ip.enable_gui(ipython_gui_name)
def uninstall_repl_displayhook() -> None:
"""Disconnect from the display hook of the current shell."""
global _REPL_DISPLAYHOOK
if _REPL_DISPLAYHOOK is _ReplDisplayHook.IPYTHON:
from IPython import get_ipython
ip = get_ipython()
ip.events.unregister("post_execute", _draw_all_if_interactive)
_REPL_DISPLAYHOOK = _ReplDisplayHook.NONE
draw_all = _pylab_helpers.Gcf.draw_all
# Ensure this appears in the pyplot docs.
@_copy_docstring_and_deprecators(matplotlib.set_loglevel)
def set_loglevel(*args, **kwargs) -> None:
return matplotlib.set_loglevel(*args, **kwargs)
@_copy_docstring_and_deprecators(Artist.findobj)
def findobj(
o: Artist | None = None,
match: Callable[[Artist], bool] | type[Artist] | None = None,
include_self: bool = True
) -> list[Artist]:
if o is None:
o = gcf()
return o.findobj(match, include_self=include_self)
_backend_mod: type[matplotlib.backend_bases._Backend] | None = None
def _get_backend_mod() -> type[matplotlib.backend_bases._Backend]:
"""
Ensure that a backend is selected and return it.
This is currently private, but may be made public in the future.
"""
if _backend_mod is None:
# Use rcParams._get("backend") to avoid going through the fallback
# logic (which will (re)import pyplot and then call switch_backend if
# we need to resolve the auto sentinel)
switch_backend(rcParams._get("backend"))
return cast(type[matplotlib.backend_bases._Backend], _backend_mod)
def switch_backend(newbackend: str) -> None:
"""
Set the pyplot backend.
Switching to an interactive backend is possible only if no event loop for
another interactive backend has started. Switching to and from
non-interactive backends is always possible.
If the new backend is different than the current backend then all open
Figures will be closed via ``plt.close('all')``.
Parameters
----------
newbackend : str
The case-insensitive name of the backend to use.
"""
global _backend_mod
# make sure the init is pulled up so we can assign to it later
import matplotlib.backends
if newbackend is rcsetup._auto_backend_sentinel:
current_framework = cbook._get_running_interactive_framework()
if (current_framework and
(backend := backend_registry.backend_for_gui_framework(
current_framework))):
candidates = [backend]
else:
candidates = []
candidates += [
"macosx", "qtagg", "gtk4agg", "gtk3agg", "tkagg", "wxagg"]
# Don't try to fallback on the cairo-based backends as they each have
# an additional dependency (pycairo) over the agg-based backend, and
# are of worse quality.
for candidate in candidates:
try:
switch_backend(candidate)
except ImportError:
continue
else:
rcParamsOrig['backend'] = candidate
return
else:
# Switching to Agg should always succeed; if it doesn't, let the
# exception propagate out.
switch_backend("agg")
rcParamsOrig["backend"] = "agg"
return
old_backend = rcParams._get('backend') # get without triggering backend resolution
module = backend_registry.load_backend_module(newbackend)
canvas_class = module.FigureCanvas
required_framework = canvas_class.required_interactive_framework
if required_framework is not None:
current_framework = cbook._get_running_interactive_framework()
if (current_framework and required_framework
and current_framework != required_framework):
raise ImportError(
"Cannot load backend {!r} which requires the {!r} interactive "
"framework, as {!r} is currently running".format(
newbackend, required_framework, current_framework))
# Load the new_figure_manager() and show() functions from the backend.
# Classically, backends can directly export these functions. This should
# keep working for backcompat.
new_figure_manager = getattr(module, "new_figure_manager", None)
show = getattr(module, "show", None)
# In that classical approach, backends are implemented as modules, but
# "inherit" default method implementations from backend_bases._Backend.
# This is achieved by creating a "class" that inherits from
# backend_bases._Backend and whose body is filled with the module globals.
class backend_mod(matplotlib.backend_bases._Backend):
locals().update(vars(module))
# However, the newer approach for defining new_figure_manager and
# show is to derive them from canvas methods. In that case, also
# update backend_mod accordingly; also, per-backend customization of
# draw_if_interactive is disabled.
if new_figure_manager is None:
def new_figure_manager_given_figure(num, figure):
return canvas_class.new_manager(figure, num)
def new_figure_manager(num, *args, FigureClass=Figure, **kwargs):
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def draw_if_interactive() -> None:
if matplotlib.is_interactive():
manager = _pylab_helpers.Gcf.get_active()
if manager:
manager.canvas.draw_idle()
backend_mod.new_figure_manager_given_figure = ( # type: ignore[method-assign]
new_figure_manager_given_figure)
backend_mod.new_figure_manager = ( # type: ignore[method-assign]
new_figure_manager)
backend_mod.draw_if_interactive = ( # type: ignore[method-assign]
draw_if_interactive)
# If the manager explicitly overrides pyplot_show, use it even if a global
# show is already present, as the latter may be here for backcompat.
manager_class = getattr(canvas_class, "manager_class", None)
# We can't compare directly manager_class.pyplot_show and FMB.pyplot_show because
# pyplot_show is a classmethod so the above constructs are bound classmethods, and
# thus always different (being bound to different classes). We also have to use
# getattr_static instead of vars as manager_class could have no __dict__.
manager_pyplot_show = inspect.getattr_static(manager_class, "pyplot_show", None)
base_pyplot_show = inspect.getattr_static(FigureManagerBase, "pyplot_show", None)
if (show is None
or (manager_pyplot_show is not None
and manager_pyplot_show != base_pyplot_show)):
if not manager_pyplot_show:
raise ValueError(
f"Backend {newbackend} defines neither FigureCanvas.manager_class nor "
f"a toplevel show function")
_pyplot_show = cast('Any', manager_class).pyplot_show
backend_mod.show = _pyplot_show # type: ignore[method-assign]
_log.debug("Loaded backend %s version %s.",
newbackend, backend_mod.backend_version)
if newbackend in ("ipympl", "widget"):
# ipympl < 0.9.4 expects rcParams["backend"] to be the fully-qualified backend
# name "module://ipympl.backend_nbagg" not short names "ipympl" or "widget".
import importlib.metadata as im
from matplotlib import _parse_to_version_info # type: ignore[attr-defined]
try:
module_version = im.version("ipympl")
if _parse_to_version_info(module_version) < (0, 9, 4):
newbackend = "module://ipympl.backend_nbagg"
except im.PackageNotFoundError:
pass
rcParams['backend'] = rcParamsDefault['backend'] = newbackend
_backend_mod = backend_mod
for func_name in ["new_figure_manager", "draw_if_interactive", "show"]:
globals()[func_name].__signature__ = inspect.signature(
getattr(backend_mod, func_name))
# Need to keep a global reference to the backend for compatibility reasons.
# See https://github.com/matplotlib/matplotlib/issues/6092
matplotlib.backends.backend = newbackend # type: ignore[attr-defined]
# Make sure the repl display hook is installed in case we become interactive.
install_repl_displayhook()
def _warn_if_gui_out_of_main_thread() -> None:
warn = False
canvas_class = cast(type[FigureCanvasBase], _get_backend_mod().FigureCanvas)
if canvas_class.required_interactive_framework:
if hasattr(threading, 'get_native_id'):
# This compares native thread ids because even if Python-level
# Thread objects match, the underlying OS thread (which is what
# really matters) may be different on Python implementations with
# green threads.
if threading.get_native_id() != threading.main_thread().native_id:
warn = True
else:
# Fall back to Python-level Thread if native IDs are unavailable,
# mainly for PyPy.
if threading.current_thread() is not threading.main_thread():
warn = True
if warn:
_api.warn_external(
"Starting a Matplotlib GUI outside of the main thread will likely "
"fail.")
# This function's signature is rewritten upon backend-load by switch_backend.
def new_figure_manager(*args, **kwargs):
"""Create a new figure manager instance."""
_warn_if_gui_out_of_main_thread()
return _get_backend_mod().new_figure_manager(*args, **kwargs)
# This function's signature is rewritten upon backend-load by switch_backend.
def draw_if_interactive(*args, **kwargs):
"""
Redraw the current figure if in interactive mode.
.. warning::
End users will typically not have to call this function because the
the interactive mode takes care of this.
"""
return _get_backend_mod().draw_if_interactive(*args, **kwargs)
# This function's signature is rewritten upon backend-load by switch_backend.
def show(*args, **kwargs) -> None:
"""
Display all open figures.
Parameters
----------
block : bool, optional
Whether to wait for all figures to be closed before returning.
If `True` block and run the GUI main loop until all figure windows
are closed.
If `False` ensure that all figure windows are displayed and return
immediately. In this case, you are responsible for ensuring
that the event loop is running to have responsive figures.
Defaults to True in non-interactive mode and to False in interactive
mode (see `.pyplot.isinteractive`).
See Also
--------
ion : Enable interactive mode, which shows / updates the figure after
every plotting command, so that calling ``show()`` is not necessary.
ioff : Disable interactive mode.
savefig : Save the figure to an image file instead of showing it on screen.
Notes
-----
**Saving figures to file and showing a window at the same time**
If you want an image file as well as a user interface window, use
`.pyplot.savefig` before `.pyplot.show`. At the end of (a blocking)
``show()`` the figure is closed and thus unregistered from pyplot. Calling
`.pyplot.savefig` afterwards would save a new and thus empty figure. This
limitation of command order does not apply if the show is non-blocking or
if you keep a reference to the figure and use `.Figure.savefig`.
**Auto-show in jupyter notebooks**
The jupyter backends (activated via ``%matplotlib inline``,
``%matplotlib notebook``, or ``%matplotlib widget``), call ``show()`` at
the end of every cell by default. Thus, you usually don't have to call it
explicitly there.
"""
_warn_if_gui_out_of_main_thread()
return _get_backend_mod().show(*args, **kwargs)
def isinteractive() -> bool:
"""
Return whether plots are updated after every plotting command.
The interactive mode is mainly useful if you build plots from the command
line and want to see the effect of each command while you are building the
figure.
In interactive mode:
- newly created figures will be shown immediately;
- figures will automatically redraw on change;
- `.pyplot.show` will not block by default.
In non-interactive mode:
- newly created figures and changes to figures will not be reflected until
explicitly asked to be;
- `.pyplot.show` will block by default.
See Also
--------
ion : Enable interactive mode.
ioff : Disable interactive mode.
show : Show all figures (and maybe block).
pause : Show all figures, and block for a time.
"""
return matplotlib.is_interactive()
# Note: The return type of ioff being AbstractContextManager
# instead of ExitStack is deliberate.
# See https://github.com/matplotlib/matplotlib/issues/27659
# and https://github.com/matplotlib/matplotlib/pull/27667 for more info.
def ioff() -> AbstractContextManager:
"""
Disable interactive mode.
See `.pyplot.isinteractive` for more details.
See Also
--------
ion : Enable interactive mode.
isinteractive : Whether interactive mode is enabled.
show : Show all figures (and maybe block).
pause : Show all figures, and block for a time.
Notes
-----
For a temporary change, this can be used as a context manager::
# if interactive mode is on
# then figures will be shown on creation
plt.ion()
# This figure will be shown immediately
fig = plt.figure()
with plt.ioff():
# interactive mode will be off
# figures will not automatically be shown
fig2 = plt.figure()
# ...
To enable optional usage as a context manager, this function returns a
context manager object, which is not intended to be stored or
accessed by the user.
"""
stack = ExitStack()
stack.callback(ion if isinteractive() else ioff)
matplotlib.interactive(False)
uninstall_repl_displayhook()
return stack
# Note: The return type of ion being AbstractContextManager
# instead of ExitStack is deliberate.
# See https://github.com/matplotlib/matplotlib/issues/27659
# and https://github.com/matplotlib/matplotlib/pull/27667 for more info.
def ion() -> AbstractContextManager:
"""
Enable interactive mode.
See `.pyplot.isinteractive` for more details.
See Also
--------
ioff : Disable interactive mode.
isinteractive : Whether interactive mode is enabled.
show : Show all figures (and maybe block).
pause : Show all figures, and block for a time.
Notes
-----
For a temporary change, this can be used as a context manager::
# if interactive mode is off
# then figures will not be shown on creation
plt.ioff()
# This figure will not be shown immediately
fig = plt.figure()
with plt.ion():
# interactive mode will be on
# figures will automatically be shown
fig2 = plt.figure()
# ...
To enable optional usage as a context manager, this function returns a
context manager object, which is not intended to be stored or
accessed by the user.
"""
stack = ExitStack()
stack.callback(ion if isinteractive() else ioff)
matplotlib.interactive(True)
install_repl_displayhook()
return stack
def pause(interval: float) -> None:
"""
Run the GUI event loop for *interval* seconds.
If there is an active figure, it will be updated and displayed before the
pause, and the GUI event loop (if any) will run during the pause.
This can be used for crude animation. For more complex animation use
:mod:`matplotlib.animation`.
If there is no active figure, sleep for *interval* seconds instead.
See Also
--------
matplotlib.animation : Proper animations
show : Show all figures and optional block until all figures are closed.
"""
manager = _pylab_helpers.Gcf.get_active()
if manager is not None:
canvas = manager.canvas
if canvas.figure.stale:
canvas.draw_idle()
show(block=False)
canvas.start_event_loop(interval)
else:
time.sleep(interval)
@_copy_docstring_and_deprecators(matplotlib.rc)
def rc(group: str, **kwargs) -> None:
matplotlib.rc(group, **kwargs)
@_copy_docstring_and_deprecators(matplotlib.rc_context)
def rc_context(
rc: dict[str, Any] | None = None,
fname: str | pathlib.Path | os.PathLike | None = None,
) -> AbstractContextManager[None]:
return matplotlib.rc_context(rc, fname)
@_copy_docstring_and_deprecators(matplotlib.rcdefaults)
def rcdefaults() -> None:
matplotlib.rcdefaults()
if matplotlib.is_interactive():
draw_all()
# getp/get/setp are explicitly reexported so that they show up in pyplot docs.
@_copy_docstring_and_deprecators(matplotlib.artist.getp)
def getp(obj, *args, **kwargs):
return matplotlib.artist.getp(obj, *args, **kwargs)
@_copy_docstring_and_deprecators(matplotlib.artist.get)
def get(obj, *args, **kwargs):
return matplotlib.artist.get(obj, *args, **kwargs)
@_copy_docstring_and_deprecators(matplotlib.artist.setp)
def setp(obj, *args, **kwargs):
return matplotlib.artist.setp(obj, *args, **kwargs)
def xkcd(
scale: float = 1, length: float = 100, randomness: float = 2
) -> ExitStack:
"""
Turn on `xkcd `_ sketch-style drawing mode.
This will only have an effect on things drawn after this function is called.
For best results, install the `xkcd script `_
font; xkcd fonts are not packaged with Matplotlib.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line.
length : float, optional
The length of the wiggle along the line.
randomness : float, optional
The scale factor by which the length is shrunken or expanded.
Notes
-----
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.xkcd():
# This figure will be in XKCD-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
# This cannot be implemented in terms of contextmanager() or rc_context()
# because this needs to work as a non-contextmanager too.
if rcParams['text.usetex']:
raise RuntimeError(
"xkcd mode is not compatible with text.usetex = True")
stack = ExitStack()
stack.callback(rcParams._update_raw, rcParams.copy()) # type: ignore[arg-type]
from matplotlib import patheffects
rcParams.update({
'font.family': ['xkcd', 'xkcd Script', 'Comic Neue', 'Comic Sans MS'],
'font.size': 14.0,
'path.sketch': (scale, length, randomness),
'path.effects': [
patheffects.withStroke(linewidth=4, foreground="w")],
'axes.linewidth': 1.5,
'lines.linewidth': 2.0,
'figure.facecolor': 'white',
'grid.linewidth': 0.0,
'axes.grid': False,
'axes.unicode_minus': False,
'axes.edgecolor': 'black',
'xtick.major.size': 8,
'xtick.major.width': 3,
'ytick.major.size': 8,
'ytick.major.width': 3,
})
return stack
## Figures ##
def figure(
# autoincrement if None, else integer from 1-N
num: int | str | Figure | SubFigure | None = None,
# defaults to rc figure.figsize
figsize: ArrayLike | None = None,
# defaults to rc figure.dpi
dpi: float | None = None,
*,
# defaults to rc figure.facecolor
facecolor: ColorType | None = None,
# defaults to rc figure.edgecolor
edgecolor: ColorType | None = None,
frameon: bool = True,
FigureClass: type[Figure] = Figure,
clear: bool = False,
**kwargs
) -> Figure:
"""
Create a new figure, or activate an existing figure.
Parameters
----------
num : int or str or `.Figure` or `.SubFigure`, optional
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made
active and returned. An integer refers to the ``Figure.number``
attribute, a string refers to the figure label.
If there is no figure with the identifier or *num* is not given, a new
figure is created, made active and returned. If *num* is an int, it
will be used for the ``Figure.number`` attribute, otherwise, an
auto-generated integer value is used (starting at 1 and incremented
for each new figure). If *num* is a string, the figure label and the
window title is set to this value. If num is a ``SubFigure``, its
parent ``Figure`` is activated.
figsize : (float, float), default: :rc:`figure.figsize`
Width, height in inches.
dpi : float, default: :rc:`figure.dpi`
The resolution of the figure in dots-per-inch.
facecolor : :mpltype:`color`, default: :rc:`figure.facecolor`
The background color.
edgecolor : :mpltype:`color`, default: :rc:`figure.edgecolor`
The border color.
frameon : bool, default: True
If False, suppress drawing the figure frame.
FigureClass : subclass of `~matplotlib.figure.Figure`
If set, an instance of this subclass will be created, rather than a
plain `.Figure`.
clear : bool, default: False
If True and the figure already exists, then it is cleared.
layout : {'constrained', 'compressed', 'tight', 'none', `.LayoutEngine`, None}, \
default: None
The layout mechanism for positioning of plot elements to avoid
overlapping Axes decorations (labels, ticks, etc). Note that layout
managers can measurably slow down figure display.
- 'constrained': The constrained layout solver adjusts Axes sizes
to avoid overlapping Axes decorations. Can handle complex plot
layouts and colorbars, and is thus recommended.
See :ref:`constrainedlayout_guide`
for examples.
- 'compressed': uses the same algorithm as 'constrained', but
removes extra space between fixed-aspect-ratio Axes. Best for
simple grids of Axes.
- 'tight': Use the tight layout mechanism. This is a relatively
simple algorithm that adjusts the subplot parameters so that
decorations do not overlap. See `.Figure.set_tight_layout` for
further details.
- 'none': Do not use a layout engine.
- A `.LayoutEngine` instance. Builtin layout classes are
`.ConstrainedLayoutEngine` and `.TightLayoutEngine`, more easily
accessible by 'constrained' and 'tight'. Passing an instance
allows third parties to provide their own layout engine.
If not given, fall back to using the parameters *tight_layout* and
*constrained_layout*, including their config defaults
:rc:`figure.autolayout` and :rc:`figure.constrained_layout.use`.
**kwargs
Additional keyword arguments are passed to the `.Figure` constructor.
Returns
-------
`~matplotlib.figure.Figure`
Notes
-----
A newly created figure is passed to the `~.FigureCanvasBase.new_manager`
method or the `new_figure_manager` function provided by the current
backend, which install a canvas and a manager on the figure.
Once this is done, :rc:`figure.hooks` are called, one at a time, on the
figure; these hooks allow arbitrary customization of the figure (e.g.,
attaching callbacks) or of associated elements (e.g., modifying the
toolbar). See :doc:`/gallery/user_interfaces/mplcvd` for an example of
toolbar customization.
If you are creating many figures, make sure you explicitly call
`.pyplot.close` on the figures you are not using, because this will
enable pyplot to properly clean up the memory.
`~matplotlib.rcParams` defines the default values, which can be modified
in the matplotlibrc file.
"""
allnums = get_fignums()
if isinstance(num, FigureBase):
# type narrowed to `Figure | SubFigure` by combination of input and isinstance
root_fig = num.get_figure(root=True)
if root_fig.canvas.manager is None:
raise ValueError("The passed figure is not managed by pyplot")
elif (any(param is not None for param in [figsize, dpi, facecolor, edgecolor])
or not frameon or kwargs) and root_fig.canvas.manager.num in allnums:
_api.warn_external(
"Ignoring specified arguments in this call because figure "
f"with num: {root_fig.canvas.manager.num} already exists")
_pylab_helpers.Gcf.set_active(root_fig.canvas.manager)
return root_fig
next_num = max(allnums) + 1 if allnums else 1
fig_label = ''
if num is None:
num = next_num
else:
if (any(param is not None for param in [figsize, dpi, facecolor, edgecolor])
or not frameon or kwargs) and num in allnums:
_api.warn_external(
"Ignoring specified arguments in this call "
f"because figure with num: {num} already exists")
if isinstance(num, str):
fig_label = num
all_labels = get_figlabels()
if fig_label not in all_labels:
if fig_label == 'all':
_api.warn_external("close('all') closes all existing figures.")
num = next_num
else:
inum = all_labels.index(fig_label)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
# Type of "num" has narrowed to int, but mypy can't quite see it
manager = _pylab_helpers.Gcf.get_fig_manager(num) # type: ignore[arg-type]
if manager is None:
max_open_warning = rcParams['figure.max_open_warning']
if len(allnums) == max_open_warning >= 1:
_api.warn_external(
f"More than {max_open_warning} figures have been opened. "
f"Figures created through the pyplot interface "
f"(`matplotlib.pyplot.figure`) are retained until explicitly "
f"closed and may consume too much memory. (To control this "
f"warning, see the rcParam `figure.max_open_warning`). "
f"Consider using `matplotlib.pyplot.close()`.",
RuntimeWarning)
manager = new_figure_manager(
num, figsize=figsize, dpi=dpi,
facecolor=facecolor, edgecolor=edgecolor, frameon=frameon,
FigureClass=FigureClass, **kwargs)
fig = manager.canvas.figure
if fig_label:
fig.set_label(fig_label)
for hookspecs in rcParams["figure.hooks"]:
module_name, dotted_name = hookspecs.split(":")
obj: Any = importlib.import_module(module_name)
for part in dotted_name.split("."):
obj = getattr(obj, part)
obj(fig)
_pylab_helpers.Gcf._set_new_active_manager(manager)
# make sure backends (inline) that we don't ship that expect this
# to be called in plotting commands to make the figure call show
# still work. There is probably a better way to do this in the
# FigureManager base class.
draw_if_interactive()
if _REPL_DISPLAYHOOK is _ReplDisplayHook.PLAIN:
fig.stale_callback = _auto_draw_if_interactive
if clear:
manager.canvas.figure.clear()
return manager.canvas.figure
def _auto_draw_if_interactive(fig, val):
"""
An internal helper function for making sure that auto-redrawing
works as intended in the plain python repl.
Parameters
----------
fig : Figure
A figure object which is assumed to be associated with a canvas
"""
if (val and matplotlib.is_interactive()
and not fig.canvas.is_saving()
and not fig.canvas._is_idle_drawing):
# Some artists can mark themselves as stale in the middle of drawing
# (e.g. axes position & tick labels being computed at draw time), but
# this shouldn't trigger a redraw because the current redraw will
# already take them into account.
with fig.canvas._idle_draw_cntx():
fig.canvas.draw_idle()
def gcf() -> Figure:
"""
Get the current figure.
If there is currently no figure on the pyplot figure stack, a new one is
created using `~.pyplot.figure()`. (To test whether there is currently a
figure on the pyplot figure stack, check whether `~.pyplot.get_fignums()`
is empty.)
"""
manager = _pylab_helpers.Gcf.get_active()
if manager is not None:
return manager.canvas.figure
else:
return figure()
def fignum_exists(num: int | str) -> bool:
"""
Return whether the figure with the given id exists.
Parameters
----------
num : int or str
A figure identifier.
Returns
-------
bool
Whether or not a figure with id *num* exists.
"""
return (
_pylab_helpers.Gcf.has_fignum(num)
if isinstance(num, int)
else num in get_figlabels()
)
def get_fignums() -> list[int]:
"""Return a list of existing figure numbers."""
return sorted(_pylab_helpers.Gcf.figs)
def get_figlabels() -> list[Any]:
"""Return a list of existing figure labels."""
managers = _pylab_helpers.Gcf.get_all_fig_managers()
managers.sort(key=lambda m: m.num)
return [m.canvas.figure.get_label() for m in managers]
def get_current_fig_manager() -> FigureManagerBase | None:
"""
Return the figure manager of the current figure.
The figure manager is a container for the actual backend-depended window
that displays the figure on screen.
If no current figure exists, a new one is created, and its figure
manager is returned.
Returns
-------
`.FigureManagerBase` or backend-dependent subclass thereof
"""
return gcf().canvas.manager
@_copy_docstring_and_deprecators(FigureCanvasBase.mpl_connect)
def connect(s: str, func: Callable[[Event], Any]) -> int:
return gcf().canvas.mpl_connect(s, func)
@_copy_docstring_and_deprecators(FigureCanvasBase.mpl_disconnect)
def disconnect(cid: int) -> None:
gcf().canvas.mpl_disconnect(cid)
def close(fig: None | int | str | Figure | Literal["all"] = None) -> None:
"""
Close a figure window, and unregister it from pyplot.
Parameters
----------
fig : None or int or str or `.Figure`
The figure to close. There are a number of ways to specify this:
- *None*: the current figure
- `.Figure`: the given `.Figure` instance
- ``int``: a figure number
- ``str``: a figure name
- 'all': all figures
Notes
-----
pyplot maintains a reference to figures created with `figure()`. When
work on the figure is completed, it should be closed, i.e. deregistered
from pyplot, to free its memory (see also :rc:figure.max_open_warning).
Closing a figure window created by `show()` automatically deregisters the
figure. For all other use cases, most prominently `savefig()` without
`show()`, the figure must be deregistered explicitly using `close()`.
"""
if fig is None:
manager = _pylab_helpers.Gcf.get_active()
if manager is None:
return
else:
_pylab_helpers.Gcf.destroy(manager)
elif fig == 'all':
_pylab_helpers.Gcf.destroy_all()
elif isinstance(fig, int):
_pylab_helpers.Gcf.destroy(fig)
elif hasattr(fig, 'int'):
# if we are dealing with a type UUID, we
# can use its integer representation
_pylab_helpers.Gcf.destroy(fig.int)
elif isinstance(fig, str):
all_labels = get_figlabels()
if fig in all_labels:
num = get_fignums()[all_labels.index(fig)]
_pylab_helpers.Gcf.destroy(num)
elif isinstance(fig, Figure):
_pylab_helpers.Gcf.destroy_fig(fig)
else:
raise TypeError("close() argument must be a Figure, an int, a string, "
"or None, not %s" % type(fig))
def clf() -> None:
"""Clear the current figure."""
gcf().clear()
def draw() -> None:
"""
Redraw the current figure.
This is used to update a figure that has been altered, but not
automatically re-drawn. If interactive mode is on (via `.ion()`), this
should be only rarely needed, but there may be ways to modify the state of
a figure without marking it as "stale". Please report these cases as bugs.
This is equivalent to calling ``fig.canvas.draw_idle()``, where ``fig`` is
the current figure.
See Also
--------
.FigureCanvasBase.draw_idle
.FigureCanvasBase.draw
"""
gcf().canvas.draw_idle()
@_copy_docstring_and_deprecators(Figure.savefig)
def savefig(*args, **kwargs) -> None:
fig = gcf()
# savefig default implementation has no return, so mypy is unhappy
# presumably this is here because subclasses can return?
res = fig.savefig(*args, **kwargs) # type: ignore[func-returns-value]
fig.canvas.draw_idle() # Need this if 'transparent=True', to reset colors.
return res
## Putting things in figures ##
def figlegend(*args, **kwargs) -> Legend:
return gcf().legend(*args, **kwargs)
if Figure.legend.__doc__:
figlegend.__doc__ = Figure.legend.__doc__ \
.replace(" legend(", " figlegend(") \
.replace("fig.legend(", "plt.figlegend(") \
.replace("ax.plot(", "plt.plot(")
## Axes ##
@_docstring.interpd
def axes(
arg: None | tuple[float, float, float, float] = None,
**kwargs
) -> matplotlib.axes.Axes:
"""
Add an Axes to the current figure and make it the current Axes.
Call signatures::
plt.axes()
plt.axes(rect, projection=None, polar=False, **kwargs)
plt.axes(ax)
Parameters
----------
arg : None or 4-tuple
The exact behavior of this function depends on the type:
- *None*: A new full window Axes is added using
``subplot(**kwargs)``.
- 4-tuple of floats *rect* = ``(left, bottom, width, height)``.
A new Axes is added with dimensions *rect* in normalized
(0, 1) units using `~.Figure.add_axes` on the current figure.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the `~.axes.Axes`. *str* is the name of
a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
sharex, sharey : `~matplotlib.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared Axes.
label : str
A label for the returned Axes.
Returns
-------
`~.axes.Axes`, or a subclass of `~.axes.Axes`
The returned Axes class depends on the projection used. It is
`~.axes.Axes` if rectilinear projection is used and
`.projections.polar.PolarAxes` if polar projection is used.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned Axes class. The keyword arguments for the
rectilinear Axes class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used, see the actual Axes
class.
%(Axes:kwdoc)s
See Also
--------
.Figure.add_axes
.pyplot.subplot
.Figure.add_subplot
.Figure.subplots
.pyplot.subplots
Examples
--------
::
# Creating a new full window Axes
plt.axes()
# Creating a new Axes with specified dimensions and a grey background
plt.axes((left, bottom, width, height), facecolor='grey')
"""
fig = gcf()
pos = kwargs.pop('position', None)
if arg is None:
if pos is None:
return fig.add_subplot(**kwargs)
else:
return fig.add_axes(pos, **kwargs)
else:
return fig.add_axes(arg, **kwargs)
def delaxes(ax: matplotlib.axes.Axes | None = None) -> None:
"""
Remove an `~.axes.Axes` (defaulting to the current Axes) from its figure.
"""
if ax is None:
ax = gca()
ax.remove()
def sca(ax: Axes) -> None:
"""
Set the current Axes to *ax* and the current Figure to the parent of *ax*.
"""
# Mypy sees ax.figure as potentially None,
# but if you are calling this, it won't be None
# Additionally the slight difference between `Figure` and `FigureBase` mypy catches
fig = ax.get_figure(root=False)
figure(fig) # type: ignore[arg-type]
fig.sca(ax) # type: ignore[union-attr]
def cla() -> None:
"""Clear the current Axes."""
# Not generated via boilerplate.py to allow a different docstring.
return gca().cla()
## More ways of creating Axes ##
@_docstring.interpd
def subplot(*args, **kwargs) -> Axes:
"""
Add an Axes to the current figure or retrieve an existing Axes.
This is a wrapper of `.Figure.add_subplot` which provides additional
behavior when working with the implicit API (see the notes section).
Call signatures::
subplot(nrows, ncols, index, **kwargs)
subplot(pos, **kwargs)
subplot(**kwargs)
subplot(ax)
Parameters
----------
*args : int, (int, int, *index*), or `.SubplotSpec`, default: (1, 1, 1)
The position of the subplot described by one of
- Three integers (*nrows*, *ncols*, *index*). The subplot will take the
*index* position on a grid with *nrows* rows and *ncols* columns.
*index* starts at 1 in the upper left corner and increases to the
right. *index* can also be a two-tuple specifying the (*first*,
*last*) indices (1-based, and including *last*) of the subplot, e.g.,
``fig.add_subplot(3, 1, (1, 2))`` makes a subplot that spans the
upper 2/3 of the figure.
- A 3-digit integer. The digits are interpreted as if given separately
as three single-digit integers, i.e. ``fig.add_subplot(235)`` is the
same as ``fig.add_subplot(2, 3, 5)``. Note that this can only be used
if there are no more than 9 subplots.
- A `.SubplotSpec`.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the subplot (`~.axes.Axes`). *str* is the name
of a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
sharex, sharey : `~matplotlib.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey. The
axis will have the same limits, ticks, and scale as the axis of the
shared Axes.
label : str
A label for the returned Axes.
Returns
-------
`~.axes.Axes`
The Axes of the subplot. The returned Axes can actually be an instance
of a subclass, such as `.projections.polar.PolarAxes` for polar
projections.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for the returned Axes
base class; except for the *figure* argument. The keyword arguments
for the rectilinear base class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used.
%(Axes:kwdoc)s
Notes
-----
.. versionchanged:: 3.8
In versions prior to 3.8, any preexisting Axes that overlap with the new Axes
beyond sharing a boundary was deleted. Deletion does not happen in more
recent versions anymore. Use `.Axes.remove` explicitly if needed.
If you do not want this behavior, use the `.Figure.add_subplot` method
or the `.pyplot.axes` function instead.
If no *kwargs* are passed and there exists an Axes in the location
specified by *args* then that Axes will be returned rather than a new
Axes being created.
If *kwargs* are passed and there exists an Axes in the location
specified by *args*, the projection type is the same, and the
*kwargs* match with the existing Axes, then the existing Axes is
returned. Otherwise a new Axes is created with the specified
parameters. We save a reference to the *kwargs* which we use
for this comparison. If any of the values in *kwargs* are
mutable we will not detect the case where they are mutated.
In these cases we suggest using `.Figure.add_subplot` and the
explicit Axes API rather than the implicit pyplot API.
See Also
--------
.Figure.add_subplot
.pyplot.subplots
.pyplot.axes
.Figure.subplots
Examples
--------
::
plt.subplot(221)
# equivalent but more general
ax1 = plt.subplot(2, 2, 1)
# add a subplot with no frame
ax2 = plt.subplot(222, frameon=False)
# add a polar subplot
plt.subplot(223, projection='polar')
# add a red subplot that shares the x-axis with ax1
plt.subplot(224, sharex=ax1, facecolor='red')
# delete ax2 from the figure
plt.delaxes(ax2)
# add ax2 to the figure again
plt.subplot(ax2)
# make the first Axes "current" again
plt.subplot(221)
"""
# Here we will only normalize `polar=True` vs `projection='polar'` and let
# downstream code deal with the rest.
unset = object()
projection = kwargs.get('projection', unset)
polar = kwargs.pop('polar', unset)
if polar is not unset and polar:
# if we got mixed messages from the user, raise
if projection is not unset and projection != 'polar':
raise ValueError(
f"polar={polar}, yet projection={projection!r}. "
"Only one of these arguments should be supplied."
)
kwargs['projection'] = projection = 'polar'
# if subplot called without arguments, create subplot(1, 1, 1)
if len(args) == 0:
args = (1, 1, 1)
# This check was added because it is very easy to type subplot(1, 2, False)
# when subplots(1, 2, False) was intended (sharex=False, that is). In most
# cases, no error will ever occur, but mysterious behavior can result
# because what was intended to be the sharex argument is instead treated as
# a subplot index for subplot()
if len(args) >= 3 and isinstance(args[2], bool):
_api.warn_external("The subplot index argument to subplot() appears "
"to be a boolean. Did you intend to use "
"subplots()?")
# Check for nrows and ncols, which are not valid subplot args:
if 'nrows' in kwargs or 'ncols' in kwargs:
raise TypeError("subplot() got an unexpected keyword argument 'ncols' "
"and/or 'nrows'. Did you intend to call subplots()?")
fig = gcf()
# First, search for an existing subplot with a matching spec.
key = SubplotSpec._from_subplot_args(fig, args)
for ax in fig.axes:
# If we found an Axes at the position, we can reuse it if the user passed no
# kwargs or if the Axes class and kwargs are identical.
if (ax.get_subplotspec() == key
and (kwargs == {}
or (ax._projection_init
== fig._process_projection_requirements(**kwargs)))):
break
else:
# we have exhausted the known Axes and none match, make a new one!
ax = fig.add_subplot(*args, **kwargs)
fig.sca(ax)
return ax
@overload
def subplots(
nrows: Literal[1] = ...,
ncols: Literal[1] = ...,
*,
sharex: bool | Literal["none", "all", "row", "col"] = ...,
sharey: bool | Literal["none", "all", "row", "col"] = ...,
squeeze: Literal[True] = ...,
width_ratios: Sequence[float] | None = ...,
height_ratios: Sequence[float] | None = ...,
subplot_kw: dict[str, Any] | None = ...,
gridspec_kw: dict[str, Any] | None = ...,
**fig_kw
) -> tuple[Figure, Axes]:
...
@overload
def subplots(
nrows: int = ...,
ncols: int = ...,
*,
sharex: bool | Literal["none", "all", "row", "col"] = ...,
sharey: bool | Literal["none", "all", "row", "col"] = ...,
squeeze: Literal[False],
width_ratios: Sequence[float] | None = ...,
height_ratios: Sequence[float] | None = ...,
subplot_kw: dict[str, Any] | None = ...,
gridspec_kw: dict[str, Any] | None = ...,
**fig_kw
) -> tuple[Figure, np.ndarray]: # TODO numpy/numpy#24738
...
@overload
def subplots(
nrows: int = ...,
ncols: int = ...,
*,
sharex: bool | Literal["none", "all", "row", "col"] = ...,
sharey: bool | Literal["none", "all", "row", "col"] = ...,
squeeze: bool = ...,
width_ratios: Sequence[float] | None = ...,
height_ratios: Sequence[float] | None = ...,
subplot_kw: dict[str, Any] | None = ...,
gridspec_kw: dict[str, Any] | None = ...,
**fig_kw
) -> tuple[Figure, Any]:
...
def subplots(
nrows: int = 1, ncols: int = 1, *,
sharex: bool | Literal["none", "all", "row", "col"] = False,
sharey: bool | Literal["none", "all", "row", "col"] = False,
squeeze: bool = True,
width_ratios: Sequence[float] | None = None,
height_ratios: Sequence[float] | None = None,
subplot_kw: dict[str, Any] | None = None,
gridspec_kw: dict[str, Any] | None = None,
**fig_kw
) -> tuple[Figure, Any]:
"""
Create a figure and a set of subplots.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Parameters
----------
nrows, ncols : int, default: 1
Number of rows/columns of the subplot grid.
sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
Controls sharing of properties among x (*sharex*) or y (*sharey*)
axes:
- True or 'all': x- or y-axis will be shared among all subplots.
- False or 'none': each subplot x- or y-axis will be independent.
- 'row': each subplot row will share an x- or y-axis.
- 'col': each subplot column will share an x- or y-axis.
When subplots have a shared x-axis along a column, only the x tick
labels of the bottom subplot are created. Similarly, when subplots
have a shared y-axis along a row, only the y tick labels of the first
column subplot are created. To later turn other subplots' ticklabels
on, use `~matplotlib.axes.Axes.tick_params`.
When subplots have a shared axis that has units, calling
`.Axis.set_units` will update each axis with the new units.
Note that it is not possible to unshare axes.
squeeze : bool, default: True
- If True, extra dimensions are squeezed out from the returned
array of `~matplotlib.axes.Axes`:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.
- If False, no squeezing at all is done: the returned Axes object is
always a 2D array containing Axes instances, even if it ends up
being 1x1.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width. Equivalent
to ``gridspec_kw={'width_ratios': [...]}``.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height. Convenience
for ``gridspec_kw={'height_ratios': [...]}``.
subplot_kw : dict, optional
Dict with keywords passed to the
`~matplotlib.figure.Figure.add_subplot` call used to create each
subplot.
gridspec_kw : dict, optional
Dict with keywords passed to the `~matplotlib.gridspec.GridSpec`
constructor used to create the grid the subplots are placed on.
**fig_kw
All additional keyword arguments are passed to the
`.pyplot.figure` call.
Returns
-------
fig : `.Figure`
ax : `~matplotlib.axes.Axes` or array of Axes
*ax* can be either a single `~.axes.Axes` object, or an array of Axes
objects if more than one subplot was created. The dimensions of the
resulting array can be controlled with the squeeze keyword, see above.
Typical idioms for handling the return value are::
# using the variable ax for single a Axes
fig, ax = plt.subplots()
# using the variable axs for multiple Axes
fig, axs = plt.subplots(2, 2)
# using tuple unpacking for multiple Axes
fig, (ax1, ax2) = plt.subplots(1, 2)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
The names ``ax`` and pluralized ``axs`` are preferred over ``axes``
because for the latter it's not clear if it refers to a single
`~.axes.Axes` instance or a collection of these.
See Also
--------
.pyplot.figure
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.Figure.add_subplot
Examples
--------
::
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Create just a figure and only one subplot
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Create two subplots and unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Create four polar Axes and access them through the returned array
fig, axs = plt.subplots(2, 2, subplot_kw=dict(projection="polar"))
axs[0, 0].plot(x, y)
axs[1, 1].scatter(x, y)
# Share a X axis with each column of subplots
plt.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
plt.subplots(2, 2, sharey='row')
# Share both X and Y axes with all subplots
plt.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
plt.subplots(2, 2, sharex=True, sharey=True)
# Create figure number 10 with a single subplot
# and clears it if it already exists.
fig, ax = plt.subplots(num=10, clear=True)
"""
fig = figure(**fig_kw)
axs = fig.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
squeeze=squeeze, subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw, height_ratios=height_ratios,
width_ratios=width_ratios)
return fig, axs
@overload
def subplot_mosaic(
mosaic: str,
*,
sharex: bool = ...,
sharey: bool = ...,
width_ratios: ArrayLike | None = ...,
height_ratios: ArrayLike | None = ...,
empty_sentinel: str = ...,
subplot_kw: dict[str, Any] | None = ...,
gridspec_kw: dict[str, Any] | None = ...,
per_subplot_kw: dict[str | tuple[str, ...], dict[str, Any]] | None = ...,
**fig_kw: Any
) -> tuple[Figure, dict[str, matplotlib.axes.Axes]]: ...
@overload
def subplot_mosaic(
mosaic: list[HashableList[_T]],
*,
sharex: bool = ...,
sharey: bool = ...,
width_ratios: ArrayLike | None = ...,
height_ratios: ArrayLike | None = ...,
empty_sentinel: _T = ...,
subplot_kw: dict[str, Any] | None = ...,
gridspec_kw: dict[str, Any] | None = ...,
per_subplot_kw: dict[_T | tuple[_T, ...], dict[str, Any]] | None = ...,
**fig_kw: Any
) -> tuple[Figure, dict[_T, matplotlib.axes.Axes]]: ...
@overload
def subplot_mosaic(
mosaic: list[HashableList[Hashable]],
*,
sharex: bool = ...,
sharey: bool = ...,
width_ratios: ArrayLike | None = ...,
height_ratios: ArrayLike | None = ...,
empty_sentinel: Any = ...,
subplot_kw: dict[str, Any] | None = ...,
gridspec_kw: dict[str, Any] | None = ...,
per_subplot_kw: dict[Hashable | tuple[Hashable, ...], dict[str, Any]] | None = ...,
**fig_kw: Any
) -> tuple[Figure, dict[Hashable, matplotlib.axes.Axes]]: ...
def subplot_mosaic(
mosaic: str | list[HashableList[_T]] | list[HashableList[Hashable]],
*,
sharex: bool = False,
sharey: bool = False,
width_ratios: ArrayLike | None = None,
height_ratios: ArrayLike | None = None,
empty_sentinel: Any = '.',
subplot_kw: dict[str, Any] | None = None,
gridspec_kw: dict[str, Any] | None = None,
per_subplot_kw: dict[str | tuple[str, ...], dict[str, Any]] |
dict[_T | tuple[_T, ...], dict[str, Any]] |
dict[Hashable | tuple[Hashable, ...], dict[str, Any]] | None = None,
**fig_kw: Any
) -> tuple[Figure, dict[str, matplotlib.axes.Axes]] | \
tuple[Figure, dict[_T, matplotlib.axes.Axes]] | \
tuple[Figure, dict[Hashable, matplotlib.axes.Axes]]:
"""
Build a layout of Axes based on ASCII art or nested lists.
This is a helper function to build complex GridSpec layouts visually.
See :ref:`mosaic`
for an example and full API documentation
Parameters
----------
mosaic : list of list of {hashable or nested} or str
A visual layout of how you want your Axes to be arranged
labeled as strings. For example ::
x = [['A panel', 'A panel', 'edge'],
['C panel', '.', 'edge']]
produces 4 Axes:
- 'A panel' which is 1 row high and spans the first two columns
- 'edge' which is 2 rows high and is on the right edge
- 'C panel' which in 1 row and 1 column wide in the bottom left
- a blank space 1 row and 1 column wide in the bottom center
Any of the entries in the layout can be a list of lists
of the same form to create nested layouts.
If input is a str, then it must be of the form ::
'''
AAE
C.E
'''
where each character is a column and each line is a row.
This only allows only single character Axes labels and does
not allow nesting but is very terse.
sharex, sharey : bool, default: False
If True, the x-axis (*sharex*) or y-axis (*sharey*) will be shared
among all subplots. In that case, tick label visibility and axis units
behave as for `subplots`. If False, each subplot's x- or y-axis will
be independent.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width. Convenience
for ``gridspec_kw={'width_ratios': [...]}``.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height. Convenience
for ``gridspec_kw={'height_ratios': [...]}``.
empty_sentinel : object, optional
Entry in the layout to mean "leave this space empty". Defaults
to ``'.'``. Note, if *layout* is a string, it is processed via
`inspect.cleandoc` to remove leading white space, which may
interfere with using white-space as the empty sentinel.
subplot_kw : dict, optional
Dictionary with keywords passed to the `.Figure.add_subplot` call
used to create each subplot. These values may be overridden by
values in *per_subplot_kw*.
per_subplot_kw : dict, optional
A dictionary mapping the Axes identifiers or tuples of identifiers
to a dictionary of keyword arguments to be passed to the
`.Figure.add_subplot` call used to create each subplot. The values
in these dictionaries have precedence over the values in
*subplot_kw*.
If *mosaic* is a string, and thus all keys are single characters,
it is possible to use a single string instead of a tuple as keys;
i.e. ``"AB"`` is equivalent to ``("A", "B")``.
.. versionadded:: 3.7
gridspec_kw : dict, optional
Dictionary with keywords passed to the `.GridSpec` constructor used
to create the grid the subplots are placed on.
**fig_kw
All additional keyword arguments are passed to the
`.pyplot.figure` call.
Returns
-------
fig : `.Figure`
The new figure
dict[label, Axes]
A dictionary mapping the labels to the Axes objects. The order of
the Axes is left-to-right and top-to-bottom of their position in the
total layout.
"""
fig = figure(**fig_kw)
ax_dict = fig.subplot_mosaic( # type: ignore[misc]
mosaic, # type: ignore[arg-type]
sharex=sharex, sharey=sharey,
height_ratios=height_ratios, width_ratios=width_ratios,
subplot_kw=subplot_kw, gridspec_kw=gridspec_kw,
empty_sentinel=empty_sentinel,
per_subplot_kw=per_subplot_kw, # type: ignore[arg-type]
)
return fig, ax_dict
def subplot2grid(
shape: tuple[int, int], loc: tuple[int, int],
rowspan: int = 1, colspan: int = 1,
fig: Figure | None = None,
**kwargs
) -> matplotlib.axes.Axes:
"""
Create a subplot at a specific location inside a regular grid.
Parameters
----------
shape : (int, int)
Number of rows and of columns of the grid in which to place axis.
loc : (int, int)
Row number and column number of the axis location within the grid.
rowspan : int, default: 1
Number of rows for the axis to span downwards.
colspan : int, default: 1
Number of columns for the axis to span to the right.
fig : `.Figure`, optional
Figure to place the subplot in. Defaults to the current figure.
**kwargs
Additional keyword arguments are handed to `~.Figure.add_subplot`.
Returns
-------
`~.axes.Axes`
The Axes of the subplot. The returned Axes can actually be an instance
of a subclass, such as `.projections.polar.PolarAxes` for polar
projections.
Notes
-----
The following call ::
ax = subplot2grid((nrows, ncols), (row, col), rowspan, colspan)
is identical to ::
fig = gcf()
gs = fig.add_gridspec(nrows, ncols)
ax = fig.add_subplot(gs[row:row+rowspan, col:col+colspan])
"""
if fig is None:
fig = gcf()
rows, cols = shape
gs = GridSpec._check_gridspec_exists(fig, rows, cols)
subplotspec = gs.new_subplotspec(loc, rowspan=rowspan, colspan=colspan)
return fig.add_subplot(subplotspec, **kwargs)
def twinx(ax: matplotlib.axes.Axes | None = None) -> _AxesBase:
"""
Make and return a second Axes that shares the *x*-axis. The new Axes will
overlay *ax* (or the current Axes if *ax* is *None*), and its ticks will be
on the right.
Examples
--------
:doc:`/gallery/subplots_axes_and_figures/two_scales`
"""
if ax is None:
ax = gca()
ax1 = ax.twinx()
return ax1
def twiny(ax: matplotlib.axes.Axes | None = None) -> _AxesBase:
"""
Make and return a second Axes that shares the *y*-axis. The new Axes will
overlay *ax* (or the current Axes if *ax* is *None*), and its ticks will be
on the top.
Examples
--------
:doc:`/gallery/subplots_axes_and_figures/two_scales`
"""
if ax is None:
ax = gca()
ax1 = ax.twiny()
return ax1
def subplot_tool(targetfig: Figure | None = None) -> SubplotTool | None:
"""
Launch a subplot tool window for a figure.
Returns
-------
`matplotlib.widgets.SubplotTool`
"""
if targetfig is None:
targetfig = gcf()
tb = targetfig.canvas.manager.toolbar # type: ignore[union-attr]
if hasattr(tb, "configure_subplots"): # toolbar2
from matplotlib.backend_bases import NavigationToolbar2
return cast(NavigationToolbar2, tb).configure_subplots()
elif hasattr(tb, "trigger_tool"): # toolmanager
from matplotlib.backend_bases import ToolContainerBase
cast(ToolContainerBase, tb).trigger_tool("subplots")
return None
else:
raise ValueError("subplot_tool can only be launched for figures with "
"an associated toolbar")
def box(on: bool | None = None) -> None:
"""
Turn the Axes box on or off on the current Axes.
Parameters
----------
on : bool or None
The new `~matplotlib.axes.Axes` box state. If ``None``, toggle
the state.
See Also
--------
:meth:`matplotlib.axes.Axes.set_frame_on`
:meth:`matplotlib.axes.Axes.get_frame_on`
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
## Axis ##
def xlim(*args, **kwargs) -> tuple[float, float]:
"""
Get or set the x limits of the current Axes.
Call signatures::
left, right = xlim() # return the current xlim
xlim((left, right)) # set the xlim to left, right
xlim(left, right) # set the xlim to left, right
If you do not specify args, you can pass *left* or *right* as kwargs,
i.e.::
xlim(right=3) # adjust the right leaving left unchanged
xlim(left=1) # adjust the left leaving right unchanged
Setting limits turns autoscaling off for the x-axis.
Returns
-------
left, right
A tuple of the new x-axis limits.
Notes
-----
Calling this function with no arguments (e.g. ``xlim()``) is the pyplot
equivalent of calling `~.Axes.get_xlim` on the current Axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_xlim` on the current Axes. All arguments are passed though.
"""
ax = gca()
if not args and not kwargs:
return ax.get_xlim()
ret = ax.set_xlim(*args, **kwargs)
return ret
def ylim(*args, **kwargs) -> tuple[float, float]:
"""
Get or set the y-limits of the current Axes.
Call signatures::
bottom, top = ylim() # return the current ylim
ylim((bottom, top)) # set the ylim to bottom, top
ylim(bottom, top) # set the ylim to bottom, top
If you do not specify args, you can alternatively pass *bottom* or
*top* as kwargs, i.e.::
ylim(top=3) # adjust the top leaving bottom unchanged
ylim(bottom=1) # adjust the bottom leaving top unchanged
Setting limits turns autoscaling off for the y-axis.
Returns
-------
bottom, top
A tuple of the new y-axis limits.
Notes
-----
Calling this function with no arguments (e.g. ``ylim()``) is the pyplot
equivalent of calling `~.Axes.get_ylim` on the current Axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_ylim` on the current Axes. All arguments are passed though.
"""
ax = gca()
if not args and not kwargs:
return ax.get_ylim()
ret = ax.set_ylim(*args, **kwargs)
return ret
def xticks(
ticks: ArrayLike | None = None,
labels: Sequence[str] | None = None,
*,
minor: bool = False,
**kwargs
) -> tuple[list[Tick] | np.ndarray, list[Text]]:
"""
Get or set the current tick locations and labels of the x-axis.
Pass no arguments to return the current values without modifying them.
Parameters
----------
ticks : array-like, optional
The list of xtick locations. Passing an empty list removes all xticks.
labels : array-like, optional
The labels to place at the given *ticks* locations. This argument can
only be passed if *ticks* is passed as well.
minor : bool, default: False
If ``False``, get/set the major ticks/labels; if ``True``, the minor
ticks/labels.
**kwargs
`.Text` properties can be used to control the appearance of the labels.
.. warning::
This only sets the properties of the current ticks, which is
only sufficient if you either pass *ticks*, resulting in a
fixed list of ticks, or if the plot is static.
Ticks are not guaranteed to be persistent. Various operations
can create, delete and modify the Tick instances. There is an
imminent risk that these settings can get lost if you work on
the figure further (including also panning/zooming on a
displayed figure).
Use `~.pyplot.tick_params` instead if possible.
Returns
-------
locs
The list of xtick locations.
labels
The list of xlabel `.Text` objects.
Notes
-----
Calling this function with no arguments (e.g. ``xticks()``) is the pyplot
equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on
the current Axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current Axes.
Examples
--------
>>> locs, labels = xticks() # Get the current locations and labels.
>>> xticks(np.arange(0, 1, step=0.2)) # Set label locations.
>>> xticks(np.arange(3), ['Tom', 'Dick', 'Sue']) # Set text labels.
>>> xticks([0, 1, 2], ['January', 'February', 'March'],
... rotation=20) # Set text labels and properties.
>>> xticks([]) # Disable xticks.
"""
ax = gca()
locs: list[Tick] | np.ndarray
if ticks is None:
locs = ax.get_xticks(minor=minor)
if labels is not None:
raise TypeError("xticks(): Parameter 'labels' can't be set "
"without setting 'ticks'")
else:
locs = ax.set_xticks(ticks, minor=minor)
labels_out: list[Text] = []
if labels is None:
labels_out = ax.get_xticklabels(minor=minor)
for l in labels_out:
l._internal_update(kwargs)
else:
labels_out = ax.set_xticklabels(labels, minor=minor, **kwargs)
return locs, labels_out
def yticks(
ticks: ArrayLike | None = None,
labels: Sequence[str] | None = None,
*,
minor: bool = False,
**kwargs
) -> tuple[list[Tick] | np.ndarray, list[Text]]:
"""
Get or set the current tick locations and labels of the y-axis.
Pass no arguments to return the current values without modifying them.
Parameters
----------
ticks : array-like, optional
The list of ytick locations. Passing an empty list removes all yticks.
labels : array-like, optional
The labels to place at the given *ticks* locations. This argument can
only be passed if *ticks* is passed as well.
minor : bool, default: False
If ``False``, get/set the major ticks/labels; if ``True``, the minor
ticks/labels.
**kwargs
`.Text` properties can be used to control the appearance of the labels.
.. warning::
This only sets the properties of the current ticks, which is
only sufficient if you either pass *ticks*, resulting in a
fixed list of ticks, or if the plot is static.
Ticks are not guaranteed to be persistent. Various operations
can create, delete and modify the Tick instances. There is an
imminent risk that these settings can get lost if you work on
the figure further (including also panning/zooming on a
displayed figure).
Use `~.pyplot.tick_params` instead if possible.
Returns
-------
locs
The list of ytick locations.
labels
The list of ylabel `.Text` objects.
Notes
-----
Calling this function with no arguments (e.g. ``yticks()``) is the pyplot
equivalent of calling `~.Axes.get_yticks` and `~.Axes.get_yticklabels` on
the current Axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_yticks` and `~.Axes.set_yticklabels` on the current Axes.
Examples
--------
>>> locs, labels = yticks() # Get the current locations and labels.
>>> yticks(np.arange(0, 1, step=0.2)) # Set label locations.
>>> yticks(np.arange(3), ['Tom', 'Dick', 'Sue']) # Set text labels.
>>> yticks([0, 1, 2], ['January', 'February', 'March'],
... rotation=45) # Set text labels and properties.
>>> yticks([]) # Disable yticks.
"""
ax = gca()
locs: list[Tick] | np.ndarray
if ticks is None:
locs = ax.get_yticks(minor=minor)
if labels is not None:
raise TypeError("yticks(): Parameter 'labels' can't be set "
"without setting 'ticks'")
else:
locs = ax.set_yticks(ticks, minor=minor)
labels_out: list[Text] = []
if labels is None:
labels_out = ax.get_yticklabels(minor=minor)
for l in labels_out:
l._internal_update(kwargs)
else:
labels_out = ax.set_yticklabels(labels, minor=minor, **kwargs)
return locs, labels_out
def rgrids(
radii: ArrayLike | None = None,
labels: Sequence[str | Text] | None = None,
angle: float | None = None,
fmt: str | None = None,
**kwargs
) -> tuple[list[Line2D], list[Text]]:
"""
Get or set the radial gridlines on the current polar plot.
Call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, fmt=None, **kwargs)
When called with no arguments, `.rgrids` simply returns the tuple
(*lines*, *labels*). When called with arguments, the labels will
appear at the specified radial distances and angle.
Parameters
----------
radii : tuple with floats
The radii for the radial gridlines
labels : tuple with strings or None
The labels to use at each radial gridline. The
`matplotlib.ticker.ScalarFormatter` will be used if None.
angle : float
The angular position of the radius labels in degrees.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'.
Returns
-------
lines : list of `.lines.Line2D`
The radial gridlines.
labels : list of `.text.Text`
The tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `.Text` properties for the labels.
See Also
--------
.pyplot.thetagrids
.projections.polar.PolarAxes.set_rgrids
.Axis.get_gridlines
.Axis.get_ticklabels
Examples
--------
::
# set the locations of the radial gridlines
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' ))
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar Axes')
if all(p is None for p in [radii, labels, angle, fmt]) and not kwargs:
lines_out: list[Line2D] = ax.yaxis.get_gridlines()
labels_out: list[Text] = ax.yaxis.get_ticklabels()
elif radii is None:
raise TypeError("'radii' cannot be None when other parameters are passed")
else:
lines_out, labels_out = ax.set_rgrids(
radii, labels=labels, angle=angle, fmt=fmt, **kwargs)
return lines_out, labels_out
def thetagrids(
angles: ArrayLike | None = None,
labels: Sequence[str | Text] | None = None,
fmt: str | None = None,
**kwargs
) -> tuple[list[Line2D], list[Text]]:
"""
Get or set the theta gridlines on the current polar plot.
Call signatures::
lines, labels = thetagrids()
lines, labels = thetagrids(angles, labels=None, fmt=None, **kwargs)
When called with no arguments, `.thetagrids` simply returns the tuple
(*lines*, *labels*). When called with arguments, the labels will
appear at the specified angles.
Parameters
----------
angles : tuple with floats, degrees
The angles of the theta gridlines.
labels : tuple with strings or None
The labels to use at each radial gridline. The
`.projections.polar.ThetaFormatter` will be used if None.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'. Note that the angle in radians will be used.
Returns
-------
lines : list of `.lines.Line2D`
The theta gridlines.
labels : list of `.text.Text`
The tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `.Text` properties for the labels.
See Also
--------
.pyplot.rgrids
.projections.polar.PolarAxes.set_thetagrids
.Axis.get_gridlines
.Axis.get_ticklabels
Examples
--------
::
# set the locations of the angular gridlines
lines, labels = thetagrids(range(45, 360, 90))
# set the locations and labels of the angular gridlines
lines, labels = thetagrids(range(45, 360, 90), ('NE', 'NW', 'SW', 'SE'))
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('thetagrids only defined for polar Axes')
if all(param is None for param in [angles, labels, fmt]) and not kwargs:
lines_out: list[Line2D] = ax.xaxis.get_ticklines()
labels_out: list[Text] = ax.xaxis.get_ticklabels()
elif angles is None:
raise TypeError("'angles' cannot be None when other parameters are passed")
else:
lines_out, labels_out = ax.set_thetagrids(angles,
labels=labels, fmt=fmt,
**kwargs)
return lines_out, labels_out
@_api.deprecated("3.7", pending=True)
def get_plot_commands() -> list[str]:
"""
Get a sorted list of all of the plotting commands.
"""
NON_PLOT_COMMANDS = {
'connect', 'disconnect', 'get_current_fig_manager', 'ginput',
'new_figure_manager', 'waitforbuttonpress'}
return [name for name in _get_pyplot_commands()
if name not in NON_PLOT_COMMANDS]
def _get_pyplot_commands() -> list[str]:
# This works by searching for all functions in this module and removing
# a few hard-coded exclusions, as well as all of the colormap-setting
# functions, and anything marked as private with a preceding underscore.
exclude = {'colormaps', 'colors', 'get_plot_commands', *colormaps}
this_module = inspect.getmodule(get_plot_commands)
return sorted(
name for name, obj in globals().items()
if not name.startswith('_') and name not in exclude
and inspect.isfunction(obj)
and inspect.getmodule(obj) is this_module)
## Plotting part 1: manually generated functions and wrappers ##
@_copy_docstring_and_deprecators(Figure.colorbar)
def colorbar(
mappable: ScalarMappable | ColorizingArtist | None = None,
cax: matplotlib.axes.Axes | None = None,
ax: matplotlib.axes.Axes | Iterable[matplotlib.axes.Axes] | None = None,
**kwargs
) -> Colorbar:
if mappable is None:
mappable = gci()
if mappable is None:
raise RuntimeError('No mappable was found to use for colorbar '
'creation. First define a mappable such as '
'an image (with imshow) or a contour set ('
'with contourf).')
ret = gcf().colorbar(mappable, cax=cax, ax=ax, **kwargs)
return ret
def clim(vmin: float | None = None, vmax: float | None = None) -> None:
"""
Set the color limits of the current image.
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images, use
`~.ScalarMappable.set_clim` on every image, for example::
for im in gca().get_images():
im.set_clim(0, 0.5)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, e.g., with imshow')
im.set_clim(vmin, vmax)
def get_cmap(name: Colormap | str | None = None, lut: int | None = None) -> Colormap:
"""
Get a colormap instance, defaulting to rc values if *name* is None.
Parameters
----------
name : `~matplotlib.colors.Colormap` or str or None, default: None
If a `.Colormap` instance, it will be returned. Otherwise, the name of
a colormap known to Matplotlib, which will be resampled by *lut*. The
default, None, means :rc:`image.cmap`.
lut : int or None, default: None
If *name* is not already a Colormap instance and *lut* is not None, the
colormap will be resampled to have *lut* entries in the lookup table.
Returns
-------
Colormap
"""
if name is None:
name = rcParams['image.cmap']
if isinstance(name, Colormap):
return name
_api.check_in_list(sorted(_colormaps), name=name)
if lut is None:
return _colormaps[name]
else:
return _colormaps[name].resampled(lut)
def set_cmap(cmap: Colormap | str) -> None:
"""
Set the default colormap, and applies it to the current image if any.
Parameters
----------
cmap : `~matplotlib.colors.Colormap` or str
A colormap instance or the name of a registered colormap.
See Also
--------
colormaps
get_cmap
"""
cmap = get_cmap(cmap)
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
@_copy_docstring_and_deprecators(matplotlib.image.imread)
def imread(
fname: str | pathlib.Path | BinaryIO, format: str | None = None
) -> np.ndarray:
return matplotlib.image.imread(fname, format)
@_copy_docstring_and_deprecators(matplotlib.image.imsave)
def imsave(
fname: str | os.PathLike | BinaryIO, arr: ArrayLike, **kwargs
) -> None:
matplotlib.image.imsave(fname, arr, **kwargs)
def matshow(A: ArrayLike, fignum: None | int = None, **kwargs) -> AxesImage:
"""
Display a 2D array as a matrix in a new figure window.
The origin is set at the upper left hand corner.
The indexing is ``(row, column)`` so that the first index runs vertically
and the second index runs horizontally in the figure:
.. code-block:: none
A[0, 0] ⋯ A[0, M-1]
⋮ ⋮
A[N-1, 0] ⋯ A[N-1, M-1]
The aspect ratio of the figure window is that of the array,
unless this would make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
Parameters
----------
A : 2D array-like
The matrix to be displayed.
fignum : None or int
If *None*, create a new, appropriately sized figure window.
If 0, use the current Axes (creating one if there is none, without ever
adjusting the figure size).
Otherwise, create a new Axes on the figure with the given number
(creating it at the appropriate size if it does not exist, but not
adjusting the figure size otherwise). Note that this will be drawn on
top of any preexisting Axes on the figure.
Returns
-------
`~matplotlib.image.AxesImage`
Other Parameters
----------------
**kwargs : `~matplotlib.axes.Axes.imshow` arguments
"""
A = np.asanyarray(A)
if fignum == 0:
ax = gca()
else:
if fignum is not None and fignum_exists(fignum):
# Do not try to set a figure size.
figsize = None
else:
# Extract actual aspect ratio of array and make appropriately sized figure.
figsize = figaspect(A)
fig = figure(fignum, figsize=figsize)
ax = fig.add_axes((0.15, 0.09, 0.775, 0.775))
im = ax.matshow(A, **kwargs)
sci(im)
return im
def polar(*args, **kwargs) -> list[Line2D]:
"""
Make a polar plot.
call signature::
polar(theta, r, [fmt], **kwargs)
This is a convenience wrapper around `.pyplot.plot`. It ensures that the
current Axes is polar (or creates one if needed) and then passes all parameters
to ``.pyplot.plot``.
.. note::
When making polar plots using the :ref:`pyplot API `,
``polar()`` should typically be the first command because that makes sure
a polar Axes is created. Using other commands such as ``plt.title()``
before this can lead to the implicit creation of a rectangular Axes, in which
case a subsequent ``polar()`` call will fail.
"""
# If an axis already exists, check if it has a polar projection
if gcf().get_axes():
ax = gca()
if not isinstance(ax, PolarAxes):
_api.warn_deprecated(
"3.10",
message="There exists a non-polar current Axes. Therefore, the "
"resulting plot from 'polar()' is non-polar. You likely "
"should call 'polar()' before any other pyplot plotting "
"commands. "
"Support for this scenario is deprecated in %(since)s and "
"will raise an error in %(removal)s"
)
else:
ax = axes(projection="polar")
return ax.plot(*args, **kwargs)
# If rcParams['backend_fallback'] is true, and an interactive backend is
# requested, ignore rcParams['backend'] and force selection of a backend that
# is compatible with the current running interactive framework.
if rcParams["backend_fallback"]:
requested_backend = rcParams._get_backend_or_none() # type: ignore[attr-defined]
requested_backend = None if requested_backend is None else requested_backend.lower()
available_backends = backend_registry.list_builtin(BackendFilter.INTERACTIVE)
if (
requested_backend in (set(available_backends) - {'webagg', 'nbagg'})
and cbook._get_running_interactive_framework()
):
rcParams._set("backend", rcsetup._auto_backend_sentinel)
# fmt: on
################# REMAINING CONTENT GENERATED BY boilerplate.py ##############
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.figimage)
def figimage(
X: ArrayLike,
xo: int = 0,
yo: int = 0,
alpha: float | None = None,
norm: str | Normalize | None = None,
cmap: str | Colormap | None = None,
vmin: float | None = None,
vmax: float | None = None,
origin: Literal["upper", "lower"] | None = None,
resize: bool = False,
*,
colorizer: Colorizer | None = None,
**kwargs,
) -> FigureImage:
return gcf().figimage(
X,
xo=xo,
yo=yo,
alpha=alpha,
norm=norm,
cmap=cmap,
vmin=vmin,
vmax=vmax,
origin=origin,
resize=resize,
colorizer=colorizer,
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.text)
def figtext(
x: float, y: float, s: str, fontdict: dict[str, Any] | None = None, **kwargs
) -> Text:
return gcf().text(x, y, s, fontdict=fontdict, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.gca)
def gca() -> Axes:
return gcf().gca()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure._gci)
def gci() -> ColorizingArtist | None:
return gcf()._gci()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.ginput)
def ginput(
n: int = 1,
timeout: float = 30,
show_clicks: bool = True,
mouse_add: MouseButton = MouseButton.LEFT,
mouse_pop: MouseButton = MouseButton.RIGHT,
mouse_stop: MouseButton = MouseButton.MIDDLE,
) -> list[tuple[int, int]]:
return gcf().ginput(
n=n,
timeout=timeout,
show_clicks=show_clicks,
mouse_add=mouse_add,
mouse_pop=mouse_pop,
mouse_stop=mouse_stop,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.subplots_adjust)
def subplots_adjust(
left: float | None = None,
bottom: float | None = None,
right: float | None = None,
top: float | None = None,
wspace: float | None = None,
hspace: float | None = None,
) -> None:
gcf().subplots_adjust(
left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.suptitle)
def suptitle(t: str, **kwargs) -> Text:
return gcf().suptitle(t, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.tight_layout)
def tight_layout(
*,
pad: float = 1.08,
h_pad: float | None = None,
w_pad: float | None = None,
rect: tuple[float, float, float, float] | None = None,
) -> None:
gcf().tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.waitforbuttonpress)
def waitforbuttonpress(timeout: float = -1) -> None | bool:
return gcf().waitforbuttonpress(timeout=timeout)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.acorr)
def acorr(
x: ArrayLike, *, data=None, **kwargs
) -> tuple[np.ndarray, np.ndarray, LineCollection | Line2D, Line2D | None]:
return gca().acorr(x, **({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.angle_spectrum)
def angle_spectrum(
x: ArrayLike,
Fs: float | None = None,
Fc: int | None = None,
window: Callable[[ArrayLike], ArrayLike] | ArrayLike | None = None,
pad_to: int | None = None,
sides: Literal["default", "onesided", "twosided"] | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray, Line2D]:
return gca().angle_spectrum(
x,
Fs=Fs,
Fc=Fc,
window=window,
pad_to=pad_to,
sides=sides,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.annotate)
def annotate(
text: str,
xy: tuple[float, float],
xytext: tuple[float, float] | None = None,
xycoords: CoordsType = "data",
textcoords: CoordsType | None = None,
arrowprops: dict[str, Any] | None = None,
annotation_clip: bool | None = None,
**kwargs,
) -> Annotation:
return gca().annotate(
text,
xy,
xytext=xytext,
xycoords=xycoords,
textcoords=textcoords,
arrowprops=arrowprops,
annotation_clip=annotation_clip,
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.arrow)
def arrow(x: float, y: float, dx: float, dy: float, **kwargs) -> FancyArrow:
return gca().arrow(x, y, dx, dy, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.autoscale)
def autoscale(
enable: bool = True,
axis: Literal["both", "x", "y"] = "both",
tight: bool | None = None,
) -> None:
gca().autoscale(enable=enable, axis=axis, tight=tight)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axhline)
def axhline(y: float = 0, xmin: float = 0, xmax: float = 1, **kwargs) -> Line2D:
return gca().axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axhspan)
def axhspan(
ymin: float, ymax: float, xmin: float = 0, xmax: float = 1, **kwargs
) -> Rectangle:
return gca().axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axis)
def axis(
arg: tuple[float, float, float, float] | bool | str | None = None,
/,
*,
emit: bool = True,
**kwargs,
) -> tuple[float, float, float, float]:
return gca().axis(arg, emit=emit, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axline)
def axline(
xy1: tuple[float, float],
xy2: tuple[float, float] | None = None,
*,
slope: float | None = None,
**kwargs,
) -> AxLine:
return gca().axline(xy1, xy2=xy2, slope=slope, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axvline)
def axvline(x: float = 0, ymin: float = 0, ymax: float = 1, **kwargs) -> Line2D:
return gca().axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axvspan)
def axvspan(
xmin: float, xmax: float, ymin: float = 0, ymax: float = 1, **kwargs
) -> Rectangle:
return gca().axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.bar)
def bar(
x: float | ArrayLike,
height: float | ArrayLike,
width: float | ArrayLike = 0.8,
bottom: float | ArrayLike | None = None,
*,
align: Literal["center", "edge"] = "center",
data=None,
**kwargs,
) -> BarContainer:
return gca().bar(
x,
height,
width=width,
bottom=bottom,
align=align,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.barbs)
def barbs(*args, data=None, **kwargs) -> Barbs:
return gca().barbs(*args, **({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.barh)
def barh(
y: float | ArrayLike,
width: float | ArrayLike,
height: float | ArrayLike = 0.8,
left: float | ArrayLike | None = None,
*,
align: Literal["center", "edge"] = "center",
data=None,
**kwargs,
) -> BarContainer:
return gca().barh(
y,
width,
height=height,
left=left,
align=align,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.bar_label)
def bar_label(
container: BarContainer,
labels: ArrayLike | None = None,
*,
fmt: str | Callable[[float], str] = "%g",
label_type: Literal["center", "edge"] = "edge",
padding: float = 0,
**kwargs,
) -> list[Annotation]:
return gca().bar_label(
container,
labels=labels,
fmt=fmt,
label_type=label_type,
padding=padding,
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.boxplot)
def boxplot(
x: ArrayLike | Sequence[ArrayLike],
notch: bool | None = None,
sym: str | None = None,
vert: bool | None = None,
orientation: Literal["vertical", "horizontal"] = "vertical",
whis: float | tuple[float, float] | None = None,
positions: ArrayLike | None = None,
widths: float | ArrayLike | None = None,
patch_artist: bool | None = None,
bootstrap: int | None = None,
usermedians: ArrayLike | None = None,
conf_intervals: ArrayLike | None = None,
meanline: bool | None = None,
showmeans: bool | None = None,
showcaps: bool | None = None,
showbox: bool | None = None,
showfliers: bool | None = None,
boxprops: dict[str, Any] | None = None,
tick_labels: Sequence[str] | None = None,
flierprops: dict[str, Any] | None = None,
medianprops: dict[str, Any] | None = None,
meanprops: dict[str, Any] | None = None,
capprops: dict[str, Any] | None = None,
whiskerprops: dict[str, Any] | None = None,
manage_ticks: bool = True,
autorange: bool = False,
zorder: float | None = None,
capwidths: float | ArrayLike | None = None,
label: Sequence[str] | None = None,
*,
data=None,
) -> dict[str, Any]:
return gca().boxplot(
x,
notch=notch,
sym=sym,
vert=vert,
orientation=orientation,
whis=whis,
positions=positions,
widths=widths,
patch_artist=patch_artist,
bootstrap=bootstrap,
usermedians=usermedians,
conf_intervals=conf_intervals,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
boxprops=boxprops,
tick_labels=tick_labels,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
capprops=capprops,
whiskerprops=whiskerprops,
manage_ticks=manage_ticks,
autorange=autorange,
zorder=zorder,
capwidths=capwidths,
label=label,
**({"data": data} if data is not None else {}),
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.broken_barh)
def broken_barh(
xranges: Sequence[tuple[float, float]],
yrange: tuple[float, float],
*,
data=None,
**kwargs,
) -> PolyCollection:
return gca().broken_barh(
xranges, yrange, **({"data": data} if data is not None else {}), **kwargs
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.clabel)
def clabel(CS: ContourSet, levels: ArrayLike | None = None, **kwargs) -> list[Text]:
return gca().clabel(CS, levels=levels, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.cohere)
def cohere(
x: ArrayLike,
y: ArrayLike,
NFFT: int = 256,
Fs: float = 2,
Fc: int = 0,
detrend: Literal["none", "mean", "linear"]
| Callable[[ArrayLike], ArrayLike] = mlab.detrend_none,
window: Callable[[ArrayLike], ArrayLike] | ArrayLike = mlab.window_hanning,
noverlap: int = 0,
pad_to: int | None = None,
sides: Literal["default", "onesided", "twosided"] = "default",
scale_by_freq: bool | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray]:
return gca().cohere(
x,
y,
NFFT=NFFT,
Fs=Fs,
Fc=Fc,
detrend=detrend,
window=window,
noverlap=noverlap,
pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.contour)
def contour(*args, data=None, **kwargs) -> QuadContourSet:
__ret = gca().contour(
*args, **({"data": data} if data is not None else {}), **kwargs
)
if __ret._A is not None: # type: ignore[attr-defined]
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.contourf)
def contourf(*args, data=None, **kwargs) -> QuadContourSet:
__ret = gca().contourf(
*args, **({"data": data} if data is not None else {}), **kwargs
)
if __ret._A is not None: # type: ignore[attr-defined]
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.csd)
def csd(
x: ArrayLike,
y: ArrayLike,
NFFT: int | None = None,
Fs: float | None = None,
Fc: int | None = None,
detrend: Literal["none", "mean", "linear"]
| Callable[[ArrayLike], ArrayLike]
| None = None,
window: Callable[[ArrayLike], ArrayLike] | ArrayLike | None = None,
noverlap: int | None = None,
pad_to: int | None = None,
sides: Literal["default", "onesided", "twosided"] | None = None,
scale_by_freq: bool | None = None,
return_line: bool | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray] | tuple[np.ndarray, np.ndarray, Line2D]:
return gca().csd(
x,
y,
NFFT=NFFT,
Fs=Fs,
Fc=Fc,
detrend=detrend,
window=window,
noverlap=noverlap,
pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
return_line=return_line,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.ecdf)
def ecdf(
x: ArrayLike,
weights: ArrayLike | None = None,
*,
complementary: bool = False,
orientation: Literal["vertical", "horizontal"] = "vertical",
compress: bool = False,
data=None,
**kwargs,
) -> Line2D:
return gca().ecdf(
x,
weights=weights,
complementary=complementary,
orientation=orientation,
compress=compress,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.errorbar)
def errorbar(
x: float | ArrayLike,
y: float | ArrayLike,
yerr: float | ArrayLike | None = None,
xerr: float | ArrayLike | None = None,
fmt: str = "",
ecolor: ColorType | None = None,
elinewidth: float | None = None,
capsize: float | None = None,
barsabove: bool = False,
lolims: bool | ArrayLike = False,
uplims: bool | ArrayLike = False,
xlolims: bool | ArrayLike = False,
xuplims: bool | ArrayLike = False,
errorevery: int | tuple[int, int] = 1,
capthick: float | None = None,
*,
data=None,
**kwargs,
) -> ErrorbarContainer:
return gca().errorbar(
x,
y,
yerr=yerr,
xerr=xerr,
fmt=fmt,
ecolor=ecolor,
elinewidth=elinewidth,
capsize=capsize,
barsabove=barsabove,
lolims=lolims,
uplims=uplims,
xlolims=xlolims,
xuplims=xuplims,
errorevery=errorevery,
capthick=capthick,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.eventplot)
def eventplot(
positions: ArrayLike | Sequence[ArrayLike],
orientation: Literal["horizontal", "vertical"] = "horizontal",
lineoffsets: float | Sequence[float] = 1,
linelengths: float | Sequence[float] = 1,
linewidths: float | Sequence[float] | None = None,
colors: ColorType | Sequence[ColorType] | None = None,
alpha: float | Sequence[float] | None = None,
linestyles: LineStyleType | Sequence[LineStyleType] = "solid",
*,
data=None,
**kwargs,
) -> EventCollection:
return gca().eventplot(
positions,
orientation=orientation,
lineoffsets=lineoffsets,
linelengths=linelengths,
linewidths=linewidths,
colors=colors,
alpha=alpha,
linestyles=linestyles,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.fill)
def fill(*args, data=None, **kwargs) -> list[Polygon]:
return gca().fill(*args, **({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.fill_between)
def fill_between(
x: ArrayLike,
y1: ArrayLike | float,
y2: ArrayLike | float = 0,
where: Sequence[bool] | None = None,
interpolate: bool = False,
step: Literal["pre", "post", "mid"] | None = None,
*,
data=None,
**kwargs,
) -> FillBetweenPolyCollection:
return gca().fill_between(
x,
y1,
y2=y2,
where=where,
interpolate=interpolate,
step=step,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.fill_betweenx)
def fill_betweenx(
y: ArrayLike,
x1: ArrayLike | float,
x2: ArrayLike | float = 0,
where: Sequence[bool] | None = None,
step: Literal["pre", "post", "mid"] | None = None,
interpolate: bool = False,
*,
data=None,
**kwargs,
) -> FillBetweenPolyCollection:
return gca().fill_betweenx(
y,
x1,
x2=x2,
where=where,
step=step,
interpolate=interpolate,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.grid)
def grid(
visible: bool | None = None,
which: Literal["major", "minor", "both"] = "major",
axis: Literal["both", "x", "y"] = "both",
**kwargs,
) -> None:
gca().grid(visible=visible, which=which, axis=axis, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hexbin)
def hexbin(
x: ArrayLike,
y: ArrayLike,
C: ArrayLike | None = None,
gridsize: int | tuple[int, int] = 100,
bins: Literal["log"] | int | Sequence[float] | None = None,
xscale: Literal["linear", "log"] = "linear",
yscale: Literal["linear", "log"] = "linear",
extent: tuple[float, float, float, float] | None = None,
cmap: str | Colormap | None = None,
norm: str | Normalize | None = None,
vmin: float | None = None,
vmax: float | None = None,
alpha: float | None = None,
linewidths: float | None = None,
edgecolors: Literal["face", "none"] | ColorType = "face",
reduce_C_function: Callable[[np.ndarray | list[float]], float] = np.mean,
mincnt: int | None = None,
marginals: bool = False,
colorizer: Colorizer | None = None,
*,
data=None,
**kwargs,
) -> PolyCollection:
__ret = gca().hexbin(
x,
y,
C=C,
gridsize=gridsize,
bins=bins,
xscale=xscale,
yscale=yscale,
extent=extent,
cmap=cmap,
norm=norm,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
edgecolors=edgecolors,
reduce_C_function=reduce_C_function,
mincnt=mincnt,
marginals=marginals,
colorizer=colorizer,
**({"data": data} if data is not None else {}),
**kwargs,
)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hist)
def hist(
x: ArrayLike | Sequence[ArrayLike],
bins: int | Sequence[float] | str | None = None,
range: tuple[float, float] | None = None,
density: bool = False,
weights: ArrayLike | None = None,
cumulative: bool | float = False,
bottom: ArrayLike | float | None = None,
histtype: Literal["bar", "barstacked", "step", "stepfilled"] = "bar",
align: Literal["left", "mid", "right"] = "mid",
orientation: Literal["vertical", "horizontal"] = "vertical",
rwidth: float | None = None,
log: bool = False,
color: ColorType | Sequence[ColorType] | None = None,
label: str | Sequence[str] | None = None,
stacked: bool = False,
*,
data=None,
**kwargs,
) -> tuple[
np.ndarray | list[np.ndarray],
np.ndarray,
BarContainer | Polygon | list[BarContainer | Polygon],
]:
return gca().hist(
x,
bins=bins,
range=range,
density=density,
weights=weights,
cumulative=cumulative,
bottom=bottom,
histtype=histtype,
align=align,
orientation=orientation,
rwidth=rwidth,
log=log,
color=color,
label=label,
stacked=stacked,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.stairs)
def stairs(
values: ArrayLike,
edges: ArrayLike | None = None,
*,
orientation: Literal["vertical", "horizontal"] = "vertical",
baseline: float | ArrayLike | None = 0,
fill: bool = False,
data=None,
**kwargs,
) -> StepPatch:
return gca().stairs(
values,
edges=edges,
orientation=orientation,
baseline=baseline,
fill=fill,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hist2d)
def hist2d(
x: ArrayLike,
y: ArrayLike,
bins: None | int | tuple[int, int] | ArrayLike | tuple[ArrayLike, ArrayLike] = 10,
range: ArrayLike | None = None,
density: bool = False,
weights: ArrayLike | None = None,
cmin: float | None = None,
cmax: float | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, QuadMesh]:
__ret = gca().hist2d(
x,
y,
bins=bins,
range=range,
density=density,
weights=weights,
cmin=cmin,
cmax=cmax,
**({"data": data} if data is not None else {}),
**kwargs,
)
sci(__ret[-1])
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hlines)
def hlines(
y: float | ArrayLike,
xmin: float | ArrayLike,
xmax: float | ArrayLike,
colors: ColorType | Sequence[ColorType] | None = None,
linestyles: LineStyleType = "solid",
label: str = "",
*,
data=None,
**kwargs,
) -> LineCollection:
return gca().hlines(
y,
xmin,
xmax,
colors=colors,
linestyles=linestyles,
label=label,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.imshow)
def imshow(
X: ArrayLike | PIL.Image.Image,
cmap: str | Colormap | None = None,
norm: str | Normalize | None = None,
*,
aspect: Literal["equal", "auto"] | float | None = None,
interpolation: str | None = None,
alpha: float | ArrayLike | None = None,
vmin: float | None = None,
vmax: float | None = None,
colorizer: Colorizer | None = None,
origin: Literal["upper", "lower"] | None = None,
extent: tuple[float, float, float, float] | None = None,
interpolation_stage: Literal["data", "rgba", "auto"] | None = None,
filternorm: bool = True,
filterrad: float = 4.0,
resample: bool | None = None,
url: str | None = None,
data=None,
**kwargs,
) -> AxesImage:
__ret = gca().imshow(
X,
cmap=cmap,
norm=norm,
aspect=aspect,
interpolation=interpolation,
alpha=alpha,
vmin=vmin,
vmax=vmax,
colorizer=colorizer,
origin=origin,
extent=extent,
interpolation_stage=interpolation_stage,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
url=url,
**({"data": data} if data is not None else {}),
**kwargs,
)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.legend)
def legend(*args, **kwargs) -> Legend:
return gca().legend(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.locator_params)
def locator_params(
axis: Literal["both", "x", "y"] = "both", tight: bool | None = None, **kwargs
) -> None:
gca().locator_params(axis=axis, tight=tight, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.loglog)
def loglog(*args, **kwargs) -> list[Line2D]:
return gca().loglog(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.magnitude_spectrum)
def magnitude_spectrum(
x: ArrayLike,
Fs: float | None = None,
Fc: int | None = None,
window: Callable[[ArrayLike], ArrayLike] | ArrayLike | None = None,
pad_to: int | None = None,
sides: Literal["default", "onesided", "twosided"] | None = None,
scale: Literal["default", "linear", "dB"] | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray, Line2D]:
return gca().magnitude_spectrum(
x,
Fs=Fs,
Fc=Fc,
window=window,
pad_to=pad_to,
sides=sides,
scale=scale,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.margins)
def margins(
*margins: float,
x: float | None = None,
y: float | None = None,
tight: bool | None = True,
) -> tuple[float, float] | None:
return gca().margins(*margins, x=x, y=y, tight=tight)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.minorticks_off)
def minorticks_off() -> None:
gca().minorticks_off()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.minorticks_on)
def minorticks_on() -> None:
gca().minorticks_on()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.pcolor)
def pcolor(
*args: ArrayLike,
shading: Literal["flat", "nearest", "auto"] | None = None,
alpha: float | None = None,
norm: str | Normalize | None = None,
cmap: str | Colormap | None = None,
vmin: float | None = None,
vmax: float | None = None,
colorizer: Colorizer | None = None,
data=None,
**kwargs,
) -> Collection:
__ret = gca().pcolor(
*args,
shading=shading,
alpha=alpha,
norm=norm,
cmap=cmap,
vmin=vmin,
vmax=vmax,
colorizer=colorizer,
**({"data": data} if data is not None else {}),
**kwargs,
)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.pcolormesh)
def pcolormesh(
*args: ArrayLike,
alpha: float | None = None,
norm: str | Normalize | None = None,
cmap: str | Colormap | None = None,
vmin: float | None = None,
vmax: float | None = None,
colorizer: Colorizer | None = None,
shading: Literal["flat", "nearest", "gouraud", "auto"] | None = None,
antialiased: bool = False,
data=None,
**kwargs,
) -> QuadMesh:
__ret = gca().pcolormesh(
*args,
alpha=alpha,
norm=norm,
cmap=cmap,
vmin=vmin,
vmax=vmax,
colorizer=colorizer,
shading=shading,
antialiased=antialiased,
**({"data": data} if data is not None else {}),
**kwargs,
)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.phase_spectrum)
def phase_spectrum(
x: ArrayLike,
Fs: float | None = None,
Fc: int | None = None,
window: Callable[[ArrayLike], ArrayLike] | ArrayLike | None = None,
pad_to: int | None = None,
sides: Literal["default", "onesided", "twosided"] | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray, Line2D]:
return gca().phase_spectrum(
x,
Fs=Fs,
Fc=Fc,
window=window,
pad_to=pad_to,
sides=sides,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.pie)
def pie(
x: ArrayLike,
explode: ArrayLike | None = None,
labels: Sequence[str] | None = None,
colors: ColorType | Sequence[ColorType] | None = None,
autopct: str | Callable[[float], str] | None = None,
pctdistance: float = 0.6,
shadow: bool = False,
labeldistance: float | None = 1.1,
startangle: float = 0,
radius: float = 1,
counterclock: bool = True,
wedgeprops: dict[str, Any] | None = None,
textprops: dict[str, Any] | None = None,
center: tuple[float, float] = (0, 0),
frame: bool = False,
rotatelabels: bool = False,
*,
normalize: bool = True,
hatch: str | Sequence[str] | None = None,
data=None,
) -> tuple[list[Wedge], list[Text]] | tuple[list[Wedge], list[Text], list[Text]]:
return gca().pie(
x,
explode=explode,
labels=labels,
colors=colors,
autopct=autopct,
pctdistance=pctdistance,
shadow=shadow,
labeldistance=labeldistance,
startangle=startangle,
radius=radius,
counterclock=counterclock,
wedgeprops=wedgeprops,
textprops=textprops,
center=center,
frame=frame,
rotatelabels=rotatelabels,
normalize=normalize,
hatch=hatch,
**({"data": data} if data is not None else {}),
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.plot)
def plot(
*args: float | ArrayLike | str,
scalex: bool = True,
scaley: bool = True,
data=None,
**kwargs,
) -> list[Line2D]:
return gca().plot(
*args,
scalex=scalex,
scaley=scaley,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.plot_date)
def plot_date(
x: ArrayLike,
y: ArrayLike,
fmt: str = "o",
tz: str | datetime.tzinfo | None = None,
xdate: bool = True,
ydate: bool = False,
*,
data=None,
**kwargs,
) -> list[Line2D]:
return gca().plot_date(
x,
y,
fmt=fmt,
tz=tz,
xdate=xdate,
ydate=ydate,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.psd)
def psd(
x: ArrayLike,
NFFT: int | None = None,
Fs: float | None = None,
Fc: int | None = None,
detrend: Literal["none", "mean", "linear"]
| Callable[[ArrayLike], ArrayLike]
| None = None,
window: Callable[[ArrayLike], ArrayLike] | ArrayLike | None = None,
noverlap: int | None = None,
pad_to: int | None = None,
sides: Literal["default", "onesided", "twosided"] | None = None,
scale_by_freq: bool | None = None,
return_line: bool | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray] | tuple[np.ndarray, np.ndarray, Line2D]:
return gca().psd(
x,
NFFT=NFFT,
Fs=Fs,
Fc=Fc,
detrend=detrend,
window=window,
noverlap=noverlap,
pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
return_line=return_line,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.quiver)
def quiver(*args, data=None, **kwargs) -> Quiver:
__ret = gca().quiver(
*args, **({"data": data} if data is not None else {}), **kwargs
)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.quiverkey)
def quiverkey(
Q: Quiver, X: float, Y: float, U: float, label: str, **kwargs
) -> QuiverKey:
return gca().quiverkey(Q, X, Y, U, label, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.scatter)
def scatter(
x: float | ArrayLike,
y: float | ArrayLike,
s: float | ArrayLike | None = None,
c: ArrayLike | Sequence[ColorType] | ColorType | None = None,
marker: MarkerType | None = None,
cmap: str | Colormap | None = None,
norm: str | Normalize | None = None,
vmin: float | None = None,
vmax: float | None = None,
alpha: float | None = None,
linewidths: float | Sequence[float] | None = None,
*,
edgecolors: Literal["face", "none"] | ColorType | Sequence[ColorType] | None = None,
colorizer: Colorizer | None = None,
plotnonfinite: bool = False,
data=None,
**kwargs,
) -> PathCollection:
__ret = gca().scatter(
x,
y,
s=s,
c=c,
marker=marker,
cmap=cmap,
norm=norm,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
edgecolors=edgecolors,
colorizer=colorizer,
plotnonfinite=plotnonfinite,
**({"data": data} if data is not None else {}),
**kwargs,
)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.semilogx)
def semilogx(*args, **kwargs) -> list[Line2D]:
return gca().semilogx(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.semilogy)
def semilogy(*args, **kwargs) -> list[Line2D]:
return gca().semilogy(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.specgram)
def specgram(
x: ArrayLike,
NFFT: int | None = None,
Fs: float | None = None,
Fc: int | None = None,
detrend: Literal["none", "mean", "linear"]
| Callable[[ArrayLike], ArrayLike]
| None = None,
window: Callable[[ArrayLike], ArrayLike] | ArrayLike | None = None,
noverlap: int | None = None,
cmap: str | Colormap | None = None,
xextent: tuple[float, float] | None = None,
pad_to: int | None = None,
sides: Literal["default", "onesided", "twosided"] | None = None,
scale_by_freq: bool | None = None,
mode: Literal["default", "psd", "magnitude", "angle", "phase"] | None = None,
scale: Literal["default", "linear", "dB"] | None = None,
vmin: float | None = None,
vmax: float | None = None,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, AxesImage]:
__ret = gca().specgram(
x,
NFFT=NFFT,
Fs=Fs,
Fc=Fc,
detrend=detrend,
window=window,
noverlap=noverlap,
cmap=cmap,
xextent=xextent,
pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode,
scale=scale,
vmin=vmin,
vmax=vmax,
**({"data": data} if data is not None else {}),
**kwargs,
)
sci(__ret[-1])
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.spy)
def spy(
Z: ArrayLike,
precision: float | Literal["present"] = 0,
marker: str | None = None,
markersize: float | None = None,
aspect: Literal["equal", "auto"] | float | None = "equal",
origin: Literal["upper", "lower"] = "upper",
**kwargs,
) -> AxesImage:
__ret = gca().spy(
Z,
precision=precision,
marker=marker,
markersize=markersize,
aspect=aspect,
origin=origin,
**kwargs,
)
if isinstance(__ret, _ColorizerInterface):
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.stackplot)
def stackplot(
x, *args, labels=(), colors=None, hatch=None, baseline="zero", data=None, **kwargs
):
return gca().stackplot(
x,
*args,
labels=labels,
colors=colors,
hatch=hatch,
baseline=baseline,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.stem)
def stem(
*args: ArrayLike | str,
linefmt: str | None = None,
markerfmt: str | None = None,
basefmt: str | None = None,
bottom: float = 0,
label: str | None = None,
orientation: Literal["vertical", "horizontal"] = "vertical",
data=None,
) -> StemContainer:
return gca().stem(
*args,
linefmt=linefmt,
markerfmt=markerfmt,
basefmt=basefmt,
bottom=bottom,
label=label,
orientation=orientation,
**({"data": data} if data is not None else {}),
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.step)
def step(
x: ArrayLike,
y: ArrayLike,
*args,
where: Literal["pre", "post", "mid"] = "pre",
data=None,
**kwargs,
) -> list[Line2D]:
return gca().step(
x,
y,
*args,
where=where,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.streamplot)
def streamplot(
x,
y,
u,
v,
density=1,
linewidth=None,
color=None,
cmap=None,
norm=None,
arrowsize=1,
arrowstyle="-|>",
minlength=0.1,
transform=None,
zorder=None,
start_points=None,
maxlength=4.0,
integration_direction="both",
broken_streamlines=True,
*,
data=None,
):
__ret = gca().streamplot(
x,
y,
u,
v,
density=density,
linewidth=linewidth,
color=color,
cmap=cmap,
norm=norm,
arrowsize=arrowsize,
arrowstyle=arrowstyle,
minlength=minlength,
transform=transform,
zorder=zorder,
start_points=start_points,
maxlength=maxlength,
integration_direction=integration_direction,
broken_streamlines=broken_streamlines,
**({"data": data} if data is not None else {}),
)
sci(__ret.lines)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.table)
def table(
cellText=None,
cellColours=None,
cellLoc="right",
colWidths=None,
rowLabels=None,
rowColours=None,
rowLoc="left",
colLabels=None,
colColours=None,
colLoc="center",
loc="bottom",
bbox=None,
edges="closed",
**kwargs,
):
return gca().table(
cellText=cellText,
cellColours=cellColours,
cellLoc=cellLoc,
colWidths=colWidths,
rowLabels=rowLabels,
rowColours=rowColours,
rowLoc=rowLoc,
colLabels=colLabels,
colColours=colColours,
colLoc=colLoc,
loc=loc,
bbox=bbox,
edges=edges,
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.text)
def text(
x: float, y: float, s: str, fontdict: dict[str, Any] | None = None, **kwargs
) -> Text:
return gca().text(x, y, s, fontdict=fontdict, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tick_params)
def tick_params(axis: Literal["both", "x", "y"] = "both", **kwargs) -> None:
gca().tick_params(axis=axis, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.ticklabel_format)
def ticklabel_format(
*,
axis: Literal["both", "x", "y"] = "both",
style: Literal["", "sci", "scientific", "plain"] | None = None,
scilimits: tuple[int, int] | None = None,
useOffset: bool | float | None = None,
useLocale: bool | None = None,
useMathText: bool | None = None,
) -> None:
gca().ticklabel_format(
axis=axis,
style=style,
scilimits=scilimits,
useOffset=useOffset,
useLocale=useLocale,
useMathText=useMathText,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tricontour)
def tricontour(*args, **kwargs):
__ret = gca().tricontour(*args, **kwargs)
if __ret._A is not None: # type: ignore[attr-defined]
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tricontourf)
def tricontourf(*args, **kwargs):
__ret = gca().tricontourf(*args, **kwargs)
if __ret._A is not None: # type: ignore[attr-defined]
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tripcolor)
def tripcolor(
*args,
alpha=1.0,
norm=None,
cmap=None,
vmin=None,
vmax=None,
shading="flat",
facecolors=None,
**kwargs,
):
__ret = gca().tripcolor(
*args,
alpha=alpha,
norm=norm,
cmap=cmap,
vmin=vmin,
vmax=vmax,
shading=shading,
facecolors=facecolors,
**kwargs,
)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.triplot)
def triplot(*args, **kwargs):
return gca().triplot(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.violinplot)
def violinplot(
dataset: ArrayLike | Sequence[ArrayLike],
positions: ArrayLike | None = None,
vert: bool | None = None,
orientation: Literal["vertical", "horizontal"] = "vertical",
widths: float | ArrayLike = 0.5,
showmeans: bool = False,
showextrema: bool = True,
showmedians: bool = False,
quantiles: Sequence[float | Sequence[float]] | None = None,
points: int = 100,
bw_method: Literal["scott", "silverman"]
| float
| Callable[[GaussianKDE], float]
| None = None,
side: Literal["both", "low", "high"] = "both",
*,
data=None,
) -> dict[str, Collection]:
return gca().violinplot(
dataset,
positions=positions,
vert=vert,
orientation=orientation,
widths=widths,
showmeans=showmeans,
showextrema=showextrema,
showmedians=showmedians,
quantiles=quantiles,
points=points,
bw_method=bw_method,
side=side,
**({"data": data} if data is not None else {}),
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.vlines)
def vlines(
x: float | ArrayLike,
ymin: float | ArrayLike,
ymax: float | ArrayLike,
colors: ColorType | Sequence[ColorType] | None = None,
linestyles: LineStyleType = "solid",
label: str = "",
*,
data=None,
**kwargs,
) -> LineCollection:
return gca().vlines(
x,
ymin,
ymax,
colors=colors,
linestyles=linestyles,
label=label,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.xcorr)
def xcorr(
x: ArrayLike,
y: ArrayLike,
normed: bool = True,
detrend: Callable[[ArrayLike], ArrayLike] = mlab.detrend_none,
usevlines: bool = True,
maxlags: int = 10,
*,
data=None,
**kwargs,
) -> tuple[np.ndarray, np.ndarray, LineCollection | Line2D, Line2D | None]:
return gca().xcorr(
x,
y,
normed=normed,
detrend=detrend,
usevlines=usevlines,
maxlags=maxlags,
**({"data": data} if data is not None else {}),
**kwargs,
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes._sci)
def sci(im: ColorizingArtist) -> None:
gca()._sci(im)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_title)
def title(
label: str,
fontdict: dict[str, Any] | None = None,
loc: Literal["left", "center", "right"] | None = None,
pad: float | None = None,
*,
y: float | None = None,
**kwargs,
) -> Text:
return gca().set_title(label, fontdict=fontdict, loc=loc, pad=pad, y=y, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_xlabel)
def xlabel(
xlabel: str,
fontdict: dict[str, Any] | None = None,
labelpad: float | None = None,
*,
loc: Literal["left", "center", "right"] | None = None,
**kwargs,
) -> Text:
return gca().set_xlabel(
xlabel, fontdict=fontdict, labelpad=labelpad, loc=loc, **kwargs
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_ylabel)
def ylabel(
ylabel: str,
fontdict: dict[str, Any] | None = None,
labelpad: float | None = None,
*,
loc: Literal["bottom", "center", "top"] | None = None,
**kwargs,
) -> Text:
return gca().set_ylabel(
ylabel, fontdict=fontdict, labelpad=labelpad, loc=loc, **kwargs
)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_xscale)
def xscale(value: str | ScaleBase, **kwargs) -> None:
gca().set_xscale(value, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_yscale)
def yscale(value: str | ScaleBase, **kwargs) -> None:
gca().set_yscale(value, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def autumn() -> None:
"""
Set the colormap to 'autumn'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("autumn")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def bone() -> None:
"""
Set the colormap to 'bone'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("bone")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def cool() -> None:
"""
Set the colormap to 'cool'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("cool")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def copper() -> None:
"""
Set the colormap to 'copper'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("copper")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def flag() -> None:
"""
Set the colormap to 'flag'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("flag")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def gray() -> None:
"""
Set the colormap to 'gray'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("gray")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def hot() -> None:
"""
Set the colormap to 'hot'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("hot")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def hsv() -> None:
"""
Set the colormap to 'hsv'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("hsv")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def jet() -> None:
"""
Set the colormap to 'jet'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("jet")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def pink() -> None:
"""
Set the colormap to 'pink'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("pink")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def prism() -> None:
"""
Set the colormap to 'prism'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("prism")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def spring() -> None:
"""
Set the colormap to 'spring'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("spring")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def summer() -> None:
"""
Set the colormap to 'summer'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("summer")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def winter() -> None:
"""
Set the colormap to 'winter'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("winter")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def magma() -> None:
"""
Set the colormap to 'magma'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("magma")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def inferno() -> None:
"""
Set the colormap to 'inferno'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("inferno")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def plasma() -> None:
"""
Set the colormap to 'plasma'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("plasma")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def viridis() -> None:
"""
Set the colormap to 'viridis'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("viridis")
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def nipy_spectral() -> None:
"""
Set the colormap to 'nipy_spectral'.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
set_cmap("nipy_spectral")
venv\Lib\site-packages\matplotlib\quiver.py
"""
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import math
import numpy as np
from numpy import ma
from matplotlib import _api, cbook, _docstring
import matplotlib.artist as martist
import matplotlib.collections as mcollections
from matplotlib.patches import CirclePolygon
import matplotlib.text as mtext
import matplotlib.transforms as transforms
_quiver_doc = """
Plot a 2D field of arrows.
Call signature::
quiver([X, Y], U, V, [C], /, **kwargs)
*X*, *Y* define the arrow locations, *U*, *V* define the arrow directions, and
*C* optionally sets the color. The arguments *X*, *Y*, *U*, *V*, *C* are
positional-only.
**Arrow length**
The default settings auto-scales the length of the arrows to a reasonable size.
To change this behavior see the *scale* and *scale_units* parameters.
**Arrow shape**
The arrow shape is determined by *width*, *headwidth*, *headlength* and
*headaxislength*. See the notes below.
**Arrow styling**
Each arrow is internally represented by a filled polygon with a default edge
linewidth of 0. As a result, an arrow is rather a filled area, not a line with
a head, and `.PolyCollection` properties like *linewidth*, *edgecolor*,
*facecolor*, etc. act accordingly.
Parameters
----------
X, Y : 1D or 2D array-like, optional
The x and y coordinates of the arrow locations.
If not given, they will be generated as a uniform integer meshgrid based
on the dimensions of *U* and *V*.
If *X* and *Y* are 1D but *U*, *V* are 2D, *X*, *Y* are expanded to 2D
using ``X, Y = np.meshgrid(X, Y)``. In this case ``len(X)`` and ``len(Y)``
must match the column and row dimensions of *U* and *V*.
U, V : 1D or 2D array-like
The x and y direction components of the arrow vectors. The interpretation
of these components (in data or in screen space) depends on *angles*.
*U* and *V* must have the same number of elements, matching the number of
arrow locations in *X*, *Y*. *U* and *V* may be masked. Locations masked
in any of *U*, *V*, and *C* will not be drawn.
C : 1D or 2D array-like, optional
Numeric data that defines the arrow colors by colormapping via *norm* and
*cmap*.
This does not support explicit colors. If you want to set colors directly,
use *color* instead. The size of *C* must match the number of arrow
locations.
angles : {'uv', 'xy'} or array-like, default: 'uv'
Method for determining the angle of the arrows.
- 'uv': Arrow directions are based on
:ref:`display coordinates `; i.e. a 45° angle will
always show up as diagonal on the screen, irrespective of figure or Axes
aspect ratio or Axes data ranges. This is useful when the arrows represent
a quantity whose direction is not tied to the x and y data coordinates.
If *U* == *V* the orientation of the arrow on the plot is 45 degrees
counter-clockwise from the horizontal axis (positive to the right).
- 'xy': Arrow direction in data coordinates, i.e. the arrows point from
(x, y) to (x+u, y+v). This is ideal for vector fields or gradient plots
where the arrows should directly represent movements or gradients in the
x and y directions.
- Arbitrary angles may be specified explicitly as an array of values
in degrees, counter-clockwise from the horizontal axis.
In this case *U*, *V* is only used to determine the length of the
arrows.
For example, ``angles=[30, 60, 90]`` will orient the arrows at 30, 60, and 90
degrees respectively, regardless of the *U* and *V* components.
Note: inverting a data axis will correspondingly invert the
arrows only with ``angles='xy'``.
pivot : {'tail', 'mid', 'middle', 'tip'}, default: 'tail'
The part of the arrow that is anchored to the *X*, *Y* grid. The arrow
rotates about this point.
'mid' is a synonym for 'middle'.
scale : float, optional
Scales the length of the arrow inversely.
Number of data values represented by one unit of arrow length on the plot.
For example, if the data represents velocity in meters per second (m/s), the
scale parameter determines how many meters per second correspond to one unit of
arrow length relative to the width of the plot.
Smaller scale parameter makes the arrow longer.
By default, an autoscaling algorithm is used to scale the arrow length to a
reasonable size, which is based on the average vector length and the number of
vectors.
The arrow length unit is given by the *scale_units* parameter.
scale_units : {'width', 'height', 'dots', 'inches', 'x', 'y', 'xy'}, default: 'width'
The physical image unit, which is used for rendering the scaled arrow data *U*, *V*.
The rendered arrow length is given by
length in x direction = $\\frac{u}{\\mathrm{scale}} \\mathrm{scale_unit}$
length in y direction = $\\frac{v}{\\mathrm{scale}} \\mathrm{scale_unit}$
For example, ``(u, v) = (0.5, 0)`` with ``scale=10, scale_unit="width"`` results
in a horizontal arrow with a length of *0.5 / 10 * "width"*, i.e. 0.05 times the
Axes width.
Supported values are:
- 'width' or 'height': The arrow length is scaled relative to the width or height
of the Axes.
For example, ``scale_units='width', scale=1.0``, will result in an arrow length
of width of the Axes.
- 'dots': The arrow length of the arrows is in measured in display dots (pixels).
- 'inches': Arrow lengths are scaled based on the DPI (dots per inch) of the figure.
This ensures that the arrows have a consistent physical size on the figure,
in inches, regardless of data values or plot scaling.
For example, ``(u, v) = (1, 0)`` with ``scale_units='inches', scale=2`` results
in a 0.5 inch-long arrow.
- 'x' or 'y': The arrow length is scaled relative to the x or y axis units.
For example, ``(u, v) = (0, 1)`` with ``scale_units='x', scale=1`` results
in a vertical arrow with the length of 1 x-axis unit.
- 'xy': Arrow length will be same as 'x' or 'y' units.
This is useful for creating vectors in the x-y plane where u and v have
the same units as x and y. To plot vectors in the x-y plane with u and v having
the same units as x and y, use ``angles='xy', scale_units='xy', scale=1``.
Note: Setting *scale_units* without setting scale does not have any effect because
the scale units only differ by a constant factor and that is rescaled through
autoscaling.
units : {'width', 'height', 'dots', 'inches', 'x', 'y', 'xy'}, default: 'width'
Affects the arrow size (except for the length). In particular, the shaft
*width* is measured in multiples of this unit.
Supported values are:
- 'width', 'height': The width or height of the Axes.
- 'dots', 'inches': Pixels or inches based on the figure dpi.
- 'x', 'y', 'xy': *X*, *Y* or :math:`\\sqrt{X^2 + Y^2}` in data units.
The following table summarizes how these values affect the visible arrow
size under zooming and figure size changes:
================= ================= ==================
units zoom figure size change
================= ================= ==================
'x', 'y', 'xy' arrow size scales —
'width', 'height' — arrow size scales
'dots', 'inches' — —
================= ================= ==================
width : float, optional
Shaft width in arrow units. All head parameters are relative to *width*.
The default depends on choice of *units* above, and number of vectors;
a typical starting value is about 0.005 times the width of the plot.
headwidth : float, default: 3
Head width as multiple of shaft *width*. See the notes below.
headlength : float, default: 5
Head length as multiple of shaft *width*. See the notes below.
headaxislength : float, default: 4.5
Head length at shaft intersection as multiple of shaft *width*.
See the notes below.
minshaft : float, default: 1
Length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
minlength : float, default: 1
Minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
color : :mpltype:`color` or list :mpltype:`color`, optional
Explicit color(s) for the arrows. If *C* has been set, *color* has no
effect.
This is a synonym for the `.PolyCollection` *facecolor* parameter.
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs : `~matplotlib.collections.PolyCollection` properties, optional
All other keyword arguments are passed on to `.PolyCollection`:
%(PolyCollection:kwdoc)s
Returns
-------
`~matplotlib.quiver.Quiver`
See Also
--------
.Axes.quiverkey : Add a key to a quiver plot.
Notes
-----
**Arrow shape**
The arrow is drawn as a polygon using the nodes as shown below. The values
*headwidth*, *headlength*, and *headaxislength* are in units of *width*.
.. image:: /_static/quiver_sizes.svg
:width: 500px
The defaults give a slightly swept-back arrow. Here are some guidelines how to
get other head shapes:
- To make the head a triangle, make *headaxislength* the same as *headlength*.
- To make the arrow more pointed, reduce *headwidth* or increase *headlength*
and *headaxislength*.
- To make the head smaller relative to the shaft, scale down all the head
parameters proportionally.
- To remove the head completely, set all *head* parameters to 0.
- To get a diamond-shaped head, make *headaxislength* larger than *headlength*.
- Warning: For *headaxislength* < (*headlength* / *headwidth*), the "headaxis"
nodes (i.e. the ones connecting the head with the shaft) will protrude out
of the head in forward direction so that the arrow head looks broken.
""" % _docstring.interpd.params
_docstring.interpd.register(quiver_doc=_quiver_doc)
class QuiverKey(martist.Artist):
"""Labelled arrow for use as a quiver plot scale key."""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'middle', 'S': 'middle', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label,
*, angle=0, coordinates='axes', color=None, labelsep=0.1,
labelpos='N', labelcolor=None, fontproperties=None,
zorder=None, **kwargs):
"""
Add a key to a quiver plot.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position of
the middle of the key arrow. If *labelpos* is 'E', *X*, *Y* positions
the head, and if *labelpos* is 'W', *X*, *Y* positions the tail; in
either of these two cases, *X*, *Y* is somewhere in the middle of the
arrow+label key object.
Parameters
----------
Q : `~matplotlib.quiver.Quiver`
A `.Quiver` object as returned by a call to `~.Axes.quiver()`.
X, Y : float
The location of the key.
U : float
The length of the key.
label : str
The key label (e.g., length and units of the key).
angle : float, default: 0
The angle of the key arrow, in degrees anti-clockwise from the
horizontal axis.
coordinates : {'axes', 'figure', 'data', 'inches'}, default: 'axes'
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with (0, 0) in the lower left and
(1, 1) in the upper right; 'data' are the axes data coordinates
(used for the locations of the vectors in the quiver plot itself);
'inches' is position in the figure in inches, with (0, 0) at the
lower left corner.
color : :mpltype:`color`
Overrides face and edge colors from *Q*.
labelpos : {'N', 'S', 'E', 'W'}
Position the label above, below, to the right, to the left of the
arrow, respectively.
labelsep : float, default: 0.1
Distance in inches between the arrow and the label.
labelcolor : :mpltype:`color`, default: :rc:`text.color`
Label color.
fontproperties : dict, optional
A dictionary with keyword arguments accepted by the
`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*.
zorder : float
The zorder of the key. The default is 0.1 above *Q*.
**kwargs
Any additional keyword arguments are used to override vector
properties taken from *Q*.
"""
super().__init__()
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.angle = angle
self.coord = coordinates
self.color = color
self.label = label
self._labelsep_inches = labelsep
self.labelpos = labelpos
self.labelcolor = labelcolor
self.fontproperties = fontproperties or dict()
self.kw = kwargs
self.text = mtext.Text(
text=label,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=self.fontproperties)
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._dpi_at_last_init = None
self.zorder = zorder if zorder is not None else Q.zorder + 0.1
@property
def labelsep(self):
return self._labelsep_inches * self.Q.axes.get_figure(root=True).dpi
def _init(self):
if True: # self._dpi_at_last_init != self.axes.get_figure().dpi
if self.Q._dpi_at_last_init != self.Q.axes.get_figure(root=True).dpi:
self.Q._init()
self._set_transform()
with cbook._setattr_cm(self.Q, pivot=self.pivot[self.labelpos],
# Hack: save and restore the Umask
Umask=ma.nomask):
u = self.U * np.cos(np.radians(self.angle))
v = self.U * np.sin(np.radians(self.angle))
self.verts = self.Q._make_verts([[0., 0.]],
np.array([u]), np.array([v]), 'uv')
kwargs = self.Q.polykw
kwargs.update(self.kw)
self.vector = mcollections.PolyCollection(
self.verts,
offsets=[(self.X, self.Y)],
offset_transform=self.get_transform(),
**kwargs)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self.vector.set_figure(self.get_figure())
self._dpi_at_last_init = self.Q.axes.get_figure(root=True).dpi
def _text_shift(self):
return {
"N": (0, +self.labelsep),
"S": (0, -self.labelsep),
"E": (+self.labelsep, 0),
"W": (-self.labelsep, 0),
}[self.labelpos]
@martist.allow_rasterization
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
pos = self.get_transform().transform((self.X, self.Y))
self.text.set_position(pos + self._text_shift())
self.text.draw(renderer)
self.stale = False
def _set_transform(self):
fig = self.Q.axes.get_figure(root=False)
self.set_transform(_api.check_getitem({
"data": self.Q.axes.transData,
"axes": self.Q.axes.transAxes,
"figure": fig.transFigure,
"inches": fig.dpi_scale_trans,
}, coordinates=self.coord))
def set_figure(self, fig):
super().set_figure(fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
if self._different_canvas(mouseevent):
return False, {}
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0] or
self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
def _parse_args(*args, caller_name='function'):
"""
Helper function to parse positional parameters for colored vector plots.
This is currently used for Quiver and Barbs.
Parameters
----------
*args : list
list of 2-5 arguments. Depending on their number they are parsed to::
U, V
U, V, C
X, Y, U, V
X, Y, U, V, C
caller_name : str
Name of the calling method (used in error messages).
"""
X = Y = C = None
nargs = len(args)
if nargs == 2:
# The use of atleast_1d allows for handling scalar arguments while also
# keeping masked arrays
U, V = np.atleast_1d(*args)
elif nargs == 3:
U, V, C = np.atleast_1d(*args)
elif nargs == 4:
X, Y, U, V = np.atleast_1d(*args)
elif nargs == 5:
X, Y, U, V, C = np.atleast_1d(*args)
else:
raise _api.nargs_error(caller_name, takes="from 2 to 5", given=nargs)
nr, nc = (1, U.shape[0]) if U.ndim == 1 else U.shape
if X is not None:
X = X.ravel()
Y = Y.ravel()
if len(X) == nc and len(Y) == nr:
X, Y = (a.ravel() for a in np.meshgrid(X, Y))
elif len(X) != len(Y):
raise ValueError('X and Y must be the same size, but '
f'X.size is {X.size} and Y.size is {Y.size}.')
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = (np.ravel(a) for a in indexgrid)
# Size validation for U, V, C is left to the set_UVC method.
return X, Y, U, V, C
def _check_consistent_shapes(*arrays):
all_shapes = {a.shape for a in arrays}
if len(all_shapes) != 1:
raise ValueError('The shapes of the passed in arrays do not match')
class Quiver(mcollections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
_PIVOT_VALS = ('tail', 'middle', 'tip')
@_docstring.Substitution(_quiver_doc)
def __init__(self, ax, *args,
scale=None, headwidth=3, headlength=5, headaxislength=4.5,
minshaft=1, minlength=1, units='width', scale_units=None,
angles='uv', width=None, color='k', pivot='tail', **kwargs):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pyplot interface documentation:
%s
"""
self._axes = ax # The attr actually set by the Artist.axes property.
X, Y, U, V, C = _parse_args(*args, caller_name='quiver')
self.X = X
self.Y = Y
self.XY = np.column_stack((X, Y))
self.N = len(X)
self.scale = scale
self.headwidth = headwidth
self.headlength = float(headlength)
self.headaxislength = headaxislength
self.minshaft = minshaft
self.minlength = minlength
self.units = units
self.scale_units = scale_units
self.angles = angles
self.width = width
if pivot.lower() == 'mid':
pivot = 'middle'
self.pivot = pivot.lower()
_api.check_in_list(self._PIVOT_VALS, pivot=self.pivot)
self.transform = kwargs.pop('transform', ax.transData)
kwargs.setdefault('facecolors', color)
kwargs.setdefault('linewidths', (0,))
super().__init__([], offsets=self.XY, offset_transform=self.transform,
closed=False, **kwargs)
self.polykw = kwargs
self.set_UVC(U, V, C)
self._dpi_at_last_init = None
def _init(self):
"""
Initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: # self._dpi_at_last_init != self.axes.figure.dpi
trans = self._set_transform()
self.span = trans.inverted().transform_bbox(self.axes.bbox).width
if self.width is None:
sn = np.clip(math.sqrt(self.N), 8, 25)
self.width = 0.06 * self.span / sn
# _make_verts sets self.scale if not already specified
if (self._dpi_at_last_init != self.axes.get_figure(root=True).dpi
and self.scale is None):
self._make_verts(self.XY, self.U, self.V, self.angles)
self._dpi_at_last_init = self.axes.get_figure(root=True).dpi
def get_datalim(self, transData):
trans = self.get_transform()
offset_trf = self.get_offset_transform()
full_transform = (trans - transData) + (offset_trf - transData)
XY = full_transform.transform(self.XY)
bbox = transforms.Bbox.null()
bbox.update_from_data_xy(XY, ignore=True)
return bbox
@martist.allow_rasterization
def draw(self, renderer):
self._init()
verts = self._make_verts(self.XY, self.U, self.V, self.angles)
self.set_verts(verts, closed=False)
super().draw(renderer)
self.stale = False
def set_UVC(self, U, V, C=None):
# We need to ensure we have a copy, not a reference
# to an array that might change before draw().
U = ma.masked_invalid(U, copy=True).ravel()
V = ma.masked_invalid(V, copy=True).ravel()
if C is not None:
C = ma.masked_invalid(C, copy=True).ravel()
for name, var in zip(('U', 'V', 'C'), (U, V, C)):
if not (var is None or var.size == self.N or var.size == 1):
raise ValueError(f'Argument {name} has a size {var.size}'
f' which does not match {self.N},'
' the number of arrow positions')
mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
if C is not None:
mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
if mask is ma.nomask:
C = C.filled()
else:
C = ma.array(C, mask=mask, copy=False)
self.U = U.filled(1)
self.V = V.filled(1)
self.Umask = mask
if C is not None:
self.set_array(C)
self.stale = True
def _dots_per_unit(self, units):
"""Return a scale factor for converting from units to pixels."""
bb = self.axes.bbox
vl = self.axes.viewLim
return _api.check_getitem({
'x': bb.width / vl.width,
'y': bb.height / vl.height,
'xy': np.hypot(*bb.size) / np.hypot(*vl.size),
'width': bb.width,
'height': bb.height,
'dots': 1.,
'inches': self.axes.get_figure(root=True).dpi,
}, units=units)
def _set_transform(self):
"""
Set the PolyCollection transform to go
from arrow width units to pixels.
"""
dx = self._dots_per_unit(self.units)
self._trans_scale = dx # pixels per arrow width unit
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
# Calculate angles and lengths for segment between (x, y), (x+u, y+v)
def _angles_lengths(self, XY, U, V, eps=1):
xy = self.axes.transData.transform(XY)
uv = np.column_stack((U, V))
xyp = self.axes.transData.transform(XY + eps * uv)
dxy = xyp - xy
angles = np.arctan2(dxy[:, 1], dxy[:, 0])
lengths = np.hypot(*dxy.T) / eps
return angles, lengths
# XY is stacked [X, Y].
# See quiver() doc for meaning of X, Y, U, V, angles.
def _make_verts(self, XY, U, V, angles):
uv = (U + V * 1j)
str_angles = angles if isinstance(angles, str) else ''
if str_angles == 'xy' and self.scale_units == 'xy':
# Here eps is 1 so that if we get U, V by diffing
# the X, Y arrays, the vectors will connect the
# points, regardless of the axis scaling (including log).
angles, lengths = self._angles_lengths(XY, U, V, eps=1)
elif str_angles == 'xy' or self.scale_units == 'xy':
# Calculate eps based on the extents of the plot
# so that we don't end up with roundoff error from
# adding a small number to a large.
eps = np.abs(self.axes.dataLim.extents).max() * 0.001
angles, lengths = self._angles_lengths(XY, U, V, eps=eps)
if str_angles and self.scale_units == 'xy':
a = lengths
else:
a = np.abs(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
if self.Umask is not ma.nomask:
amean = a[~self.Umask].mean()
else:
amean = a.mean()
# crude auto-scaling
# scale is typical arrow length as a multiple of the arrow width
scale = 1.8 * amean * sn / self.span
if self.scale_units is None:
if self.scale is None:
self.scale = scale
widthu_per_lenu = 1.0
else:
if self.scale_units == 'xy':
dx = 1
else:
dx = self._dots_per_unit(self.scale_units)
widthu_per_lenu = dx / self._trans_scale
if self.scale is None:
self.scale = scale * widthu_per_lenu
length = a * (widthu_per_lenu / (self.scale * self.width))
X, Y = self._h_arrows(length)
if str_angles == 'xy':
theta = angles
elif str_angles == 'uv':
theta = np.angle(uv)
else:
theta = ma.masked_invalid(np.deg2rad(angles)).filled(0)
theta = theta.reshape((-1, 1)) # for broadcasting
xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
XY = np.stack((xy.real, xy.imag), axis=2)
if self.Umask is not ma.nomask:
XY = ma.array(XY)
XY[self.Umask] = ma.masked
# This might be handled more efficiently with nans, given
# that nans will end up in the paths anyway.
return XY
def _h_arrows(self, length):
"""Length is in arrow width units."""
# It might be possible to streamline the code
# and speed it up a bit by using complex (x, y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# This number is chosen based on when pixel values overflow in Agg
# causing rendering errors
# length = np.minimum(length, 2 ** 16)
np.clip(length, 0, 2 ** 16, out=length)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0],
np.float64)
x = x + np.array([0, 1, 1, 1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis, :], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh - self.headaxislength,
minsh - self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0, 1, 2, 3, 2, 1, 0, 0]
X = x[:, ii]
Y = y[:, ii]
Y[:, 3:-1] *= -1
X0 = x0[ii]
Y0 = y0[ii]
Y0[3:-1] *= -1
shrink = length / minsh if minsh != 0. else 0.
X0 = shrink * X0[np.newaxis, :]
Y0 = shrink * Y0[np.newaxis, :]
short = np.repeat(length < minsh, 8, axis=1)
# Now select X0, Y0 if short, otherwise X, Y
np.copyto(X, X0, where=short)
np.copyto(Y, Y0, where=short)
if self.pivot == 'middle':
X -= 0.5 * X[:, 3, np.newaxis]
elif self.pivot == 'tip':
# numpy bug? using -= does not work here unless we multiply by a
# float first, as with 'mid'.
X = X - X[:, 3, np.newaxis]
elif self.pivot != 'tail':
_api.check_in_list(["middle", "tip", "tail"], pivot=self.pivot)
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = np.repeat(tooshort, 8, 1)
np.copyto(X, X1, where=tooshort)
np.copyto(Y, Y1, where=tooshort)
# Mask handling is deferred to the caller, _make_verts.
return X, Y
_barbs_doc = r"""
Plot a 2D field of wind barbs.
Call signature::
barbs([X, Y], U, V, [C], /, **kwargs)
Where *X*, *Y* define the barb locations, *U*, *V* define the barb
directions, and *C* optionally sets the color.
The arguments *X*, *Y*, *U*, *V*, *C* are positional-only and may be
1D or 2D. *U*, *V*, *C* may be masked arrays, but masked *X*, *Y*
are not supported at present.
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \
: / \ \
: / \ \ \
: / \ \ \
: ------------------------------
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
See also https://en.wikipedia.org/wiki/Wind_barb.
Parameters
----------
X, Y : 1D or 2D array-like, optional
The x and y coordinates of the barb locations. See *pivot* for how the
barbs are drawn to the x, y positions.
If not given, they will be generated as a uniform integer meshgrid based
on the dimensions of *U* and *V*.
If *X* and *Y* are 1D but *U*, *V* are 2D, *X*, *Y* are expanded to 2D
using ``X, Y = np.meshgrid(X, Y)``. In this case ``len(X)`` and ``len(Y)``
must match the column and row dimensions of *U* and *V*.
U, V : 1D or 2D array-like
The x and y components of the barb shaft.
C : 1D or 2D array-like, optional
Numeric data that defines the barb colors by colormapping via *norm* and
*cmap*.
This does not support explicit colors. If you want to set colors directly,
use *barbcolor* instead.
length : float, default: 7
Length of the barb in points; the other parts of the barb
are scaled against this.
pivot : {'tip', 'middle'} or float, default: 'tip'
The part of the arrow that is anchored to the *X*, *Y* grid. The barb
rotates about this point. This can also be a number, which shifts the
start of the barb that many points away from grid point.
barbcolor : :mpltype:`color` or color sequence
The color of all parts of the barb except for the flags. This parameter
is analogous to the *edgecolor* parameter for polygons, which can be used
instead. However this parameter will override facecolor.
flagcolor : :mpltype:`color` or color sequence
The color of any flags on the barb. This parameter is analogous to the
*facecolor* parameter for polygons, which can be used instead. However,
this parameter will override facecolor. If this is not set (and *C* has
not either) then *flagcolor* will be set to match *barbcolor* so that the
barb has a uniform color. If *C* has been set, *flagcolor* has no effect.
sizes : dict, optional
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
fill_empty : bool, default: False
Whether the empty barbs (circles) that are drawn should be filled with
the flag color. If they are not filled, the center is transparent.
rounding : bool, default: True
Whether the vector magnitude should be rounded when allocating barb
components. If True, the magnitude is rounded to the nearest multiple
of the half-barb increment. If False, the magnitude is simply truncated
to the next lowest multiple.
barb_increments : dict, optional
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
flip_barb : bool or array-like of bool, default: False
Whether the lines and flags should point opposite to normal.
Normal behavior is for the barbs and lines to point right (comes from wind
barbs having these features point towards low pressure in the Northern
Hemisphere).
A single value is applied to all barbs. Individual barbs can be flipped by
passing a bool array of the same size as *U* and *V*.
Returns
-------
barbs : `~matplotlib.quiver.Barbs`
Other Parameters
----------------
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
The barbs can further be customized using `.PolyCollection` keyword
arguments:
%(PolyCollection:kwdoc)s
""" % _docstring.interpd.params
_docstring.interpd.register(barbs_doc=_barbs_doc)
class Barbs(mcollections.PolyCollection):
"""
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
"""
# This may be an abuse of polygons here to render what is essentially maybe
# 1 triangle and a series of lines. It works fine as far as I can tell
# however.
@_docstring.interpd
def __init__(self, ax, *args,
pivot='tip', length=7, barbcolor=None, flagcolor=None,
sizes=None, fill_empty=False, barb_increments=None,
rounding=True, flip_barb=False, **kwargs):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pyplot interface documentation:
%(barbs_doc)s
"""
self.sizes = sizes or dict()
self.fill_empty = fill_empty
self.barb_increments = barb_increments or dict()
self.rounding = rounding
self.flip = np.atleast_1d(flip_barb)
transform = kwargs.pop('transform', ax.transData)
self._pivot = pivot
self._length = length
# Flagcolor and barbcolor provide convenience parameters for
# setting the facecolor and edgecolor, respectively, of the barb
# polygon. We also work here to make the flag the same color as the
# rest of the barb by default
if None in (barbcolor, flagcolor):
kwargs['edgecolors'] = 'face'
if flagcolor:
kwargs['facecolors'] = flagcolor
elif barbcolor:
kwargs['facecolors'] = barbcolor
else:
# Set to facecolor passed in or default to black
kwargs.setdefault('facecolors', 'k')
else:
kwargs['edgecolors'] = barbcolor
kwargs['facecolors'] = flagcolor
# Explicitly set a line width if we're not given one, otherwise
# polygons are not outlined and we get no barbs
if 'linewidth' not in kwargs and 'lw' not in kwargs:
kwargs['linewidth'] = 1
# Parse out the data arrays from the various configurations supported
x, y, u, v, c = _parse_args(*args, caller_name='barbs')
self.x = x
self.y = y
xy = np.column_stack((x, y))
# Make a collection
barb_size = self._length ** 2 / 4 # Empirically determined
super().__init__(
[], (barb_size,), offsets=xy, offset_transform=transform, **kwargs)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
"""
Find how many of each of the tail pieces is necessary.
Parameters
----------
mag : `~numpy.ndarray`
Vector magnitudes; must be non-negative (and an actual ndarray).
rounding : bool, default: True
Whether to round or to truncate to the nearest half-barb.
half, full, flag : float, defaults: 5, 10, 50
Increments for a half-barb, a barb, and a flag.
Returns
-------
n_flags, n_barbs : int array
For each entry in *mag*, the number of flags and barbs.
half_flag : bool array
For each entry in *mag*, whether a half-barb is needed.
empty_flag : bool array
For each entry in *mag*, whether nothing is drawn.
"""
# If rounding, round to the nearest multiple of half, the smallest
# increment
if rounding:
mag = half * np.around(mag / half)
n_flags, mag = divmod(mag, flag)
n_barb, mag = divmod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (n_flags > 0) | (n_barb > 0))
return n_flags.astype(int), n_barb.astype(int), half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
"""
Create the wind barbs.
Parameters
----------
u, v
Components of the vector in the x and y directions, respectively.
nflags, nbarbs, half_barb, empty_flag
Respectively, the number of flags, number of barbs, flag for
half a barb, and flag for empty barb, ostensibly obtained from
:meth:`_find_tails`.
length
The length of the barb staff in points.
pivot : {"tip", "middle"} or number
The point on the barb around which the entire barb should be
rotated. If a number, the start of the barb is shifted by that
many points from the origin.
sizes : dict
Coefficients specifying the ratio of a given feature to the length
of the barb. These features include:
- *spacing*: space between features (flags, full/half barbs).
- *height*: distance from shaft of top of a flag or full barb.
- *width*: width of a flag, twice the width of a full barb.
- *emptybarb*: radius of the circle used for low magnitudes.
fill_empty : bool
Whether the circle representing an empty barb should be filled or
not (this changes the drawing of the polygon).
flip : list of bool
Whether the features should be flipped to the other side of the
barb (useful for winds in the southern hemisphere).
Returns
-------
list of arrays of vertices
Polygon vertices for each of the wind barbs. These polygons have
been rotated to properly align with the vector direction.
"""
# These control the spacing and size of barb elements relative to the
# length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
# Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length / 2.)
endx = 0.0
try:
endy = float(pivot)
except ValueError:
endy = pivot_points[pivot.lower()]
# Get the appropriate angle for the vector components. The offset is
# due to the way the barb is initially drawn, going down the y-axis.
# This makes sense in a meteorological mode of thinking since there 0
# degrees corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi / 2)
# Used for low magnitude. We just get the vertices, so if we make it
# out here, it can be reused. The center set here should put the
# center of the circle at the location(offset), rather than at the
# same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
# If we don't want the empty one filled, we make a degenerate
# polygon that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
# If the vector magnitude is too weak to draw anything, plot an
# empty circle instead
if empty_flag[index]:
# We can skip the transform since the circle has no preferred
# orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
# Handle if this barb should be flipped
barb_height = -full_height if flip[index] else full_height
# Add vertices for each flag
for i in range(nflags[index]):
# The spacing that works for the barbs is a little to much for
# the flags, but this only occurs when we have more than 1
# flag.
if offset != length:
offset += spacing / 2.
poly_verts.extend(
[[endx, endy + offset],
[endx + barb_height, endy - full_width / 2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
# Add vertices for each barb. These really are lines, but works
# great adding 3 vertices that basically pull the polygon out and
# back down the line
for i in range(nbarbs[index]):
poly_verts.extend(
[(endx, endy + offset),
(endx + barb_height, endy + offset + full_width / 2),
(endx, endy + offset)])
offset -= spacing
# Add the vertices for half a barb, if needed
if half_barb[index]:
# If the half barb is the first on the staff, traditionally it
# is offset from the end to make it easy to distinguish from a
# barb with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend(
[(endx, endy + offset),
(endx + barb_height / 2, endy + offset + full_width / 4),
(endx, endy + offset)])
# Rotate the barb according the angle. Making the barb first and
# then rotating it made the math for drawing the barb really easy.
# Also, the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
def set_UVC(self, U, V, C=None):
# We need to ensure we have a copy, not a reference to an array that
# might change before draw().
self.u = ma.masked_invalid(U, copy=True).ravel()
self.v = ma.masked_invalid(V, copy=True).ravel()
# Flip needs to have the same number of entries as everything else.
# Use broadcast_to to avoid a bloated array of identical values.
# (can't rely on actual broadcasting)
if len(self.flip) == 1:
flip = np.broadcast_to(self.flip, self.u.shape)
else:
flip = self.flip
if C is not None:
c = ma.masked_invalid(C, copy=True).ravel()
x, y, u, v, c, flip = cbook.delete_masked_points(
self.x.ravel(), self.y.ravel(), self.u, self.v, c,
flip.ravel())
_check_consistent_shapes(x, y, u, v, c, flip)
else:
x, y, u, v, flip = cbook.delete_masked_points(
self.x.ravel(), self.y.ravel(), self.u, self.v, flip.ravel())
_check_consistent_shapes(x, y, u, v, flip)
magnitude = np.hypot(u, v)
flags, barbs, halves, empty = self._find_tails(
magnitude, self.rounding, **self.barb_increments)
# Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes,
self.fill_empty, flip)
self.set_verts(plot_barbs)
# Set the color array
if C is not None:
self.set_array(c)
# Update the offsets in case the masked data changed
xy = np.column_stack((x, y))
self._offsets = xy
self.stale = True
def set_offsets(self, xy):
"""
Set the offsets for the barb polygons. This saves the offsets passed
in and masks them as appropriate for the existing U/V data.
Parameters
----------
xy : sequence of pairs of floats
"""
self.x = xy[:, 0]
self.y = xy[:, 1]
x, y, u, v = cbook.delete_masked_points(
self.x.ravel(), self.y.ravel(), self.u, self.v)
_check_consistent_shapes(x, y, u, v)
xy = np.column_stack((x, y))
super().set_offsets(xy)
self.stale = True
venv\Lib\site-packages\matplotlib\rcsetup.py
"""
The rcsetup module contains the validation code for customization using
Matplotlib's rc settings.
Each rc setting is assigned a function used to validate any attempted changes
to that setting. The validation functions are defined in the rcsetup module,
and are used to construct the rcParams global object which stores the settings
and is referenced throughout Matplotlib.
The default values of the rc settings are set in the default matplotlibrc file.
Any additions or deletions to the parameter set listed here should also be
propagated to the :file:`lib/matplotlib/mpl-data/matplotlibrc` in Matplotlib's
root source directory.
"""
import ast
from functools import lru_cache, reduce
from numbers import Real
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.backends import BackendFilter, backend_registry
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
# Don't let the original cycler collide with our validating cycler
from cycler import Cycler, cycler as ccycler
@_api.caching_module_getattr
class __getattr__:
@_api.deprecated(
"3.9",
alternative="``matplotlib.backends.backend_registry.list_builtin"
"(matplotlib.backends.BackendFilter.INTERACTIVE)``")
@property
def interactive_bk(self):
return backend_registry.list_builtin(BackendFilter.INTERACTIVE)
@_api.deprecated(
"3.9",
alternative="``matplotlib.backends.backend_registry.list_builtin"
"(matplotlib.backends.BackendFilter.NON_INTERACTIVE)``")
@property
def non_interactive_bk(self):
return backend_registry.list_builtin(BackendFilter.NON_INTERACTIVE)
@_api.deprecated(
"3.9",
alternative="``matplotlib.backends.backend_registry.list_builtin()``")
@property
def all_backends(self):
return backend_registry.list_builtin()
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False, *,
_deprecated_since=None):
"""*valid* is a list of legal strings."""
self.key = key
self.ignorecase = ignorecase
self._deprecated_since = _deprecated_since
def func(s):
if ignorecase:
return s.lower()
else:
return s
self.valid = {func(k): k for k in valid}
def __call__(self, s):
if self._deprecated_since:
name, = (k for k, v in globals().items() if v is self)
_api.warn_deprecated(
self._deprecated_since, name=name, obj_type="function")
if self.ignorecase and isinstance(s, str):
s = s.lower()
if s in self.valid:
return self.valid[s]
msg = (f"{s!r} is not a valid value for {self.key}; supported values "
f"are {[*self.valid.values()]}")
if (isinstance(s, str)
and (s.startswith('"') and s.endswith('"')
or s.startswith("'") and s.endswith("'"))
and s[1:-1] in self.valid):
msg += "; remove quotes surrounding your string"
raise ValueError(msg)
@lru_cache
def _listify_validator(scalar_validator, allow_stringlist=False, *,
n=None, doc=None):
def f(s):
if isinstance(s, str):
try:
val = [scalar_validator(v.strip()) for v in s.split(',')
if v.strip()]
except Exception:
if allow_stringlist:
# Sometimes, a list of colors might be a single string
# of single-letter colornames. So give that a shot.
val = [scalar_validator(v.strip()) for v in s if v.strip()]
else:
raise
# Allow any ordered sequence type -- generators, np.ndarray, pd.Series
# -- but not sets, whose iteration order is non-deterministic.
elif np.iterable(s) and not isinstance(s, (set, frozenset)):
# The condition on this list comprehension will preserve the
# behavior of filtering out any empty strings (behavior was
# from the original validate_stringlist()), while allowing
# any non-string/text scalar values such as numbers and arrays.
val = [scalar_validator(v) for v in s
if not isinstance(v, str) or v]
else:
raise ValueError(
f"Expected str or other non-set iterable, but got {s}")
if n is not None and len(val) != n:
raise ValueError(
f"Expected {n} values, but there are {len(val)} values in {s}")
return val
try:
f.__name__ = f"{scalar_validator.__name__}list"
except AttributeError: # class instance.
f.__name__ = f"{type(scalar_validator).__name__}List"
f.__qualname__ = f.__qualname__.rsplit(".", 1)[0] + "." + f.__name__
f.__doc__ = doc if doc is not None else scalar_validator.__doc__
return f
def validate_any(s):
return s
validate_anylist = _listify_validator(validate_any)
def _validate_date(s):
try:
np.datetime64(s)
return s
except ValueError:
raise ValueError(
f'{s!r} should be a string that can be parsed by numpy.datetime64')
def validate_bool(b):
"""Convert b to ``bool`` or raise."""
if isinstance(b, str):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError(f'Cannot convert {b!r} to bool')
def validate_axisbelow(s):
try:
return validate_bool(s)
except ValueError:
if isinstance(s, str):
if s == 'line':
return 'line'
raise ValueError(f'{s!r} cannot be interpreted as'
' True, False, or "line"')
def validate_dpi(s):
"""Confirm s is string 'figure' or convert s to float or raise."""
if s == 'figure':
return s
try:
return float(s)
except ValueError as e:
raise ValueError(f'{s!r} is not string "figure" and '
f'could not convert {s!r} to float') from e
def _make_type_validator(cls, *, allow_none=False):
"""
Return a validator that converts inputs to *cls* or raises (and possibly
allows ``None`` as well).
"""
def validator(s):
if (allow_none and
(s is None or cbook._str_lower_equal(s, "none"))):
return None
if cls is str and not isinstance(s, str):
raise ValueError(f'Could not convert {s!r} to str')
try:
return cls(s)
except (TypeError, ValueError) as e:
raise ValueError(
f'Could not convert {s!r} to {cls.__name__}') from e
validator.__name__ = f"validate_{cls.__name__}"
if allow_none:
validator.__name__ += "_or_None"
validator.__qualname__ = (
validator.__qualname__.rsplit(".", 1)[0] + "." + validator.__name__)
return validator
validate_string = _make_type_validator(str)
validate_string_or_None = _make_type_validator(str, allow_none=True)
validate_stringlist = _listify_validator(
validate_string, doc='return a list of strings')
validate_int = _make_type_validator(int)
validate_int_or_None = _make_type_validator(int, allow_none=True)
validate_float = _make_type_validator(float)
validate_float_or_None = _make_type_validator(float, allow_none=True)
validate_floatlist = _listify_validator(
validate_float, doc='return a list of floats')
def _validate_marker(s):
try:
return validate_int(s)
except ValueError as e:
try:
return validate_string(s)
except ValueError as e:
raise ValueError('Supported markers are [string, int]') from e
_validate_markerlist = _listify_validator(
_validate_marker, doc='return a list of markers')
def _validate_pathlike(s):
if isinstance(s, (str, os.PathLike)):
# Store value as str because savefig.directory needs to distinguish
# between "" (cwd) and "." (cwd, but gets updated by user selections).
return os.fsdecode(s)
else:
return validate_string(s)
def validate_fonttype(s):
"""
Confirm that this is a Postscript or PDF font type that we know how to
convert to.
"""
fonttypes = {'type3': 3,
'truetype': 42}
try:
fonttype = validate_int(s)
except ValueError:
try:
return fonttypes[s.lower()]
except KeyError as e:
raise ValueError('Supported Postscript/PDF font types are %s'
% list(fonttypes)) from e
else:
if fonttype not in fonttypes.values():
raise ValueError(
'Supported Postscript/PDF font types are %s' %
list(fonttypes.values()))
return fonttype
_auto_backend_sentinel = object()
def validate_backend(s):
if s is _auto_backend_sentinel or backend_registry.is_valid_backend(s):
return s
else:
msg = (f"'{s}' is not a valid value for backend; supported values are "
f"{backend_registry.list_all()}")
raise ValueError(msg)
def _validate_toolbar(s):
s = ValidateInStrings(
'toolbar', ['None', 'toolbar2', 'toolmanager'], ignorecase=True)(s)
if s == 'toolmanager':
_api.warn_external(
"Treat the new Tool classes introduced in v1.5 as experimental "
"for now; the API and rcParam may change in future versions.")
return s
def validate_color_or_inherit(s):
"""Return a valid color arg."""
if cbook._str_equal(s, 'inherit'):
return s
return validate_color(s)
def validate_color_or_auto(s):
if cbook._str_equal(s, 'auto'):
return s
return validate_color(s)
def validate_color_for_prop_cycle(s):
# N-th color cycle syntax can't go into the color cycle.
if isinstance(s, str) and re.match("^C[0-9]$", s):
raise ValueError(f"Cannot put cycle reference ({s!r}) in prop_cycler")
return validate_color(s)
def _validate_color_or_linecolor(s):
if cbook._str_equal(s, 'linecolor'):
return s
elif cbook._str_equal(s, 'mfc') or cbook._str_equal(s, 'markerfacecolor'):
return 'markerfacecolor'
elif cbook._str_equal(s, 'mec') or cbook._str_equal(s, 'markeredgecolor'):
return 'markeredgecolor'
elif s is None:
return None
elif isinstance(s, str) and len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if s.lower() == 'none':
return None
elif is_color_like(s):
return s
raise ValueError(f'{s!r} does not look like a color arg')
def validate_color(s):
"""Return a valid color arg."""
if isinstance(s, str):
if s.lower() == 'none':
return 'none'
if len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if is_color_like(s):
return s
# If it is still valid, it must be a tuple (as a string from matplotlibrc).
try:
color = ast.literal_eval(s)
except (SyntaxError, ValueError):
pass
else:
if is_color_like(color):
return color
raise ValueError(f'{s!r} does not look like a color arg')
validate_colorlist = _listify_validator(
validate_color, allow_stringlist=True, doc='return a list of colorspecs')
def _validate_cmap(s):
_api.check_isinstance((str, Colormap), cmap=s)
return s
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError as e:
raise ValueError('not a valid aspect specification') from e
def validate_fontsize_None(s):
if s is None or s == 'None':
return None
else:
return validate_fontsize(s)
def validate_fontsize(s):
fontsizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large', 'smaller', 'larger']
if isinstance(s, str):
s = s.lower()
if s in fontsizes:
return s
try:
return float(s)
except ValueError as e:
raise ValueError("%s is not a valid font size. Valid font sizes "
"are %s." % (s, ", ".join(fontsizes))) from e
validate_fontsizelist = _listify_validator(validate_fontsize)
def validate_fontweight(s):
weights = [
'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman',
'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black']
# Note: Historically, weights have been case-sensitive in Matplotlib
if s in weights:
return s
try:
return int(s)
except (ValueError, TypeError) as e:
raise ValueError(f'{s} is not a valid font weight.') from e
def validate_fontstretch(s):
stretchvalues = [
'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed',
'normal', 'semi-expanded', 'expanded', 'extra-expanded',
'ultra-expanded']
# Note: Historically, stretchvalues have been case-sensitive in Matplotlib
if s in stretchvalues:
return s
try:
return int(s)
except (ValueError, TypeError) as e:
raise ValueError(f'{s} is not a valid font stretch.') from e
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
def _validate_mathtext_fallback(s):
_fallback_fonts = ['cm', 'stix', 'stixsans']
if isinstance(s, str):
s = s.lower()
if s is None or s == 'none':
return None
elif s.lower() in _fallback_fonts:
return s
else:
raise ValueError(
f"{s} is not a valid fallback font name. Valid fallback font "
f"names are {','.join(_fallback_fonts)}. Passing 'None' will turn "
"fallback off.")
def validate_whiskers(s):
try:
return _listify_validator(validate_float, n=2)(s)
except (TypeError, ValueError):
try:
return float(s)
except ValueError as e:
raise ValueError("Not a valid whisker value [float, "
"(float, float)]") from e
def validate_ps_distiller(s):
if isinstance(s, str):
s = s.lower()
if s in ('none', None, 'false', False):
return None
else:
return ValidateInStrings('ps.usedistiller', ['ghostscript', 'xpdf'])(s)
# A validator dedicated to the named line styles, based on the items in
# ls_mapper, and a list of possible strings read from Line2D.set_linestyle
_validate_named_linestyle = ValidateInStrings(
'linestyle',
[*ls_mapper.keys(), *ls_mapper.values(), 'None', 'none', ' ', ''],
ignorecase=True)
def _validate_linestyle(ls):
"""
A validator for all possible line styles, the named ones *and*
the on-off ink sequences.
"""
if isinstance(ls, str):
try: # Look first for a valid named line style, like '--' or 'solid'.
return _validate_named_linestyle(ls)
except ValueError:
pass
try:
ls = ast.literal_eval(ls) # Parsing matplotlibrc.
except (SyntaxError, ValueError):
pass # Will error with the ValueError at the end.
def _is_iterable_not_string_like(x):
# Explicitly exclude bytes/bytearrays so that they are not
# nonsensically interpreted as sequences of numbers (codepoints).
return np.iterable(x) and not isinstance(x, (str, bytes, bytearray))
if _is_iterable_not_string_like(ls):
if len(ls) == 2 and _is_iterable_not_string_like(ls[1]):
# (offset, (on, off, on, off, ...))
offset, onoff = ls
else:
# For backcompat: (on, off, on, off, ...); the offset is implicit.
offset = 0
onoff = ls
if (isinstance(offset, Real)
and len(onoff) % 2 == 0
and all(isinstance(elem, Real) for elem in onoff)):
return (offset, onoff)
raise ValueError(f"linestyle {ls!r} is not a valid on-off ink sequence.")
validate_fillstyle = ValidateInStrings(
'markers.fillstyle', ['full', 'left', 'right', 'bottom', 'top', 'none'])
validate_fillstylelist = _listify_validator(validate_fillstyle)
def validate_markevery(s):
"""
Validate the markevery property of a Line2D object.
Parameters
----------
s : None, int, (int, int), slice, float, (float, float), or list[int]
Returns
-------
None, int, (int, int), slice, float, (float, float), or list[int]
"""
# Validate s against type slice float int and None
if isinstance(s, (slice, float, int, type(None))):
return s
# Validate s against type tuple
if isinstance(s, tuple):
if (len(s) == 2
and (all(isinstance(e, int) for e in s)
or all(isinstance(e, float) for e in s))):
return s
else:
raise TypeError(
"'markevery' tuple must be pair of ints or of floats")
# Validate s against type list
if isinstance(s, list):
if all(isinstance(e, int) for e in s):
return s
else:
raise TypeError(
"'markevery' list must have all elements of type int")
raise TypeError("'markevery' is of an invalid type")
validate_markeverylist = _listify_validator(validate_markevery)
def validate_bbox(s):
if isinstance(s, str):
s = s.lower()
if s == 'tight':
return s
if s == 'standard':
return None
raise ValueError("bbox should be 'tight' or 'standard'")
elif s is not None:
# Backwards compatibility. None is equivalent to 'standard'.
raise ValueError("bbox should be 'tight' or 'standard'")
return s
def validate_sketch(s):
if isinstance(s, str):
s = s.lower().strip()
if s.startswith("(") and s.endswith(")"):
s = s[1:-1]
if s == 'none' or s is None:
return None
try:
return tuple(_listify_validator(validate_float, n=3)(s))
except ValueError as exc:
raise ValueError("Expected a (scale, length, randomness) tuple") from exc
def _validate_greaterthan_minushalf(s):
s = validate_float(s)
if s > -0.5:
return s
else:
raise RuntimeError(f'Value must be >-0.5; got {s}')
def _validate_greaterequal0_lessequal1(s):
s = validate_float(s)
if 0 <= s <= 1:
return s
else:
raise RuntimeError(f'Value must be >=0 and <=1; got {s}')
def _validate_int_greaterequal0(s):
s = validate_int(s)
if s >= 0:
return s
else:
raise RuntimeError(f'Value must be >=0; got {s}')
def validate_hatch(s):
r"""
Validate a hatch pattern.
A hatch pattern string can have any sequence of the following
characters: ``\ / | - + * . x o O``.
"""
if not isinstance(s, str):
raise ValueError("Hatch pattern must be a string")
_api.check_isinstance(str, hatch_pattern=s)
unknown = set(s) - {'\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O'}
if unknown:
raise ValueError("Unknown hatch symbol(s): %s" % list(unknown))
return s
validate_hatchlist = _listify_validator(validate_hatch)
validate_dashlist = _listify_validator(validate_floatlist)
def _validate_minor_tick_ndivs(n):
"""
Validate ndiv parameter related to the minor ticks.
It controls the number of minor ticks to be placed between
two major ticks.
"""
if cbook._str_lower_equal(n, 'auto'):
return n
try:
n = _validate_int_greaterequal0(n)
return n
except (RuntimeError, ValueError):
pass
raise ValueError("'tick.minor.ndivs' must be 'auto' or non-negative int")
_prop_validators = {
'color': _listify_validator(validate_color_for_prop_cycle,
allow_stringlist=True),
'linewidth': validate_floatlist,
'linestyle': _listify_validator(_validate_linestyle),
'facecolor': validate_colorlist,
'edgecolor': validate_colorlist,
'joinstyle': _listify_validator(JoinStyle),
'capstyle': _listify_validator(CapStyle),
'fillstyle': validate_fillstylelist,
'markerfacecolor': validate_colorlist,
'markersize': validate_floatlist,
'markeredgewidth': validate_floatlist,
'markeredgecolor': validate_colorlist,
'markevery': validate_markeverylist,
'alpha': validate_floatlist,
'marker': _validate_markerlist,
'hatch': validate_hatchlist,
'dashes': validate_dashlist,
}
_prop_aliases = {
'c': 'color',
'lw': 'linewidth',
'ls': 'linestyle',
'fc': 'facecolor',
'ec': 'edgecolor',
'mfc': 'markerfacecolor',
'mec': 'markeredgecolor',
'mew': 'markeredgewidth',
'ms': 'markersize',
}
def cycler(*args, **kwargs):
"""
Create a `~cycler.Cycler` object much like :func:`cycler.cycler`,
but includes input validation.
Call signatures::
cycler(cycler)
cycler(label=values, label2=values2, ...)
cycler(label, values)
Form 1 copies a given `~cycler.Cycler` object.
Form 2 creates a `~cycler.Cycler` which cycles over one or more
properties simultaneously. If multiple properties are given, their
value lists must have the same length.
Form 3 creates a `~cycler.Cycler` for a single property. This form
exists for compatibility with the original cycler. Its use is
discouraged in favor of the kwarg form, i.e. ``cycler(label=values)``.
Parameters
----------
cycler : Cycler
Copy constructor for Cycler.
label : str
The property key. Must be a valid `.Artist` property.
For example, 'color' or 'linestyle'. Aliases are allowed,
such as 'c' for 'color' and 'lw' for 'linewidth'.
values : iterable
Finite-length iterable of the property values. These values
are validated and will raise a ValueError if invalid.
Returns
-------
Cycler
A new :class:`~cycler.Cycler` for the given properties.
Examples
--------
Creating a cycler for a single property:
>>> c = cycler(color=['red', 'green', 'blue'])
Creating a cycler for simultaneously cycling over multiple properties
(e.g. red circle, green plus, blue cross):
>>> c = cycler(color=['red', 'green', 'blue'],
... marker=['o', '+', 'x'])
"""
if args and kwargs:
raise TypeError("cycler() can only accept positional OR keyword "
"arguments -- not both.")
elif not args and not kwargs:
raise TypeError("cycler() must have positional OR keyword arguments")
if len(args) == 1:
if not isinstance(args[0], Cycler):
raise TypeError("If only one positional argument given, it must "
"be a Cycler instance.")
return validate_cycler(args[0])
elif len(args) == 2:
pairs = [(args[0], args[1])]
elif len(args) > 2:
raise _api.nargs_error('cycler', '0-2', len(args))
else:
pairs = kwargs.items()
validated = []
for prop, vals in pairs:
norm_prop = _prop_aliases.get(prop, prop)
validator = _prop_validators.get(norm_prop, None)
if validator is None:
raise TypeError("Unknown artist property: %s" % prop)
vals = validator(vals)
# We will normalize the property names as well to reduce
# the amount of alias handling code elsewhere.
validated.append((norm_prop, vals))
return reduce(operator.add, (ccycler(k, v) for k, v in validated))
class _DunderChecker(ast.NodeVisitor):
def visit_Attribute(self, node):
if node.attr.startswith("__") and node.attr.endswith("__"):
raise ValueError("cycler strings with dunders are forbidden")
self.generic_visit(node)
# A validator dedicated to the named legend loc
_validate_named_legend_loc = ValidateInStrings(
'legend.loc',
[
"best",
"upper right", "upper left", "lower left", "lower right", "right",
"center left", "center right", "lower center", "upper center",
"center"],
ignorecase=True)
def _validate_legend_loc(loc):
"""
Confirm that loc is a type which rc.Params["legend.loc"] supports.
.. versionadded:: 3.8
Parameters
----------
loc : str | int | (float, float) | str((float, float))
The location of the legend.
Returns
-------
loc : str | int | (float, float) or raise ValueError exception
The location of the legend.
"""
if isinstance(loc, str):
try:
return _validate_named_legend_loc(loc)
except ValueError:
pass
try:
loc = ast.literal_eval(loc)
except (SyntaxError, ValueError):
pass
if isinstance(loc, int):
if 0 <= loc <= 10:
return loc
if isinstance(loc, tuple):
if len(loc) == 2 and all(isinstance(e, Real) for e in loc):
return loc
raise ValueError(f"{loc} is not a valid legend location.")
def validate_cycler(s):
"""Return a Cycler object from a string repr or the object itself."""
if isinstance(s, str):
# TODO: We might want to rethink this...
# While I think I have it quite locked down, it is execution of
# arbitrary code without sanitation.
# Combine this with the possibility that rcparams might come from the
# internet (future plans), this could be downright dangerous.
# I locked it down by only having the 'cycler()' function available.
# UPDATE: Partly plugging a security hole.
# I really should have read this:
# https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
# We should replace this eval with a combo of PyParsing and
# ast.literal_eval()
try:
_DunderChecker().visit(ast.parse(s))
s = eval(s, {'cycler': cycler, '__builtins__': {}})
except BaseException as e:
raise ValueError(f"{s!r} is not a valid cycler construction: {e}"
) from e
# Should make sure what comes from the above eval()
# is a Cycler object.
if isinstance(s, Cycler):
cycler_inst = s
else:
raise ValueError(f"Object is not a string or Cycler instance: {s!r}")
unknowns = cycler_inst.keys - (set(_prop_validators) | set(_prop_aliases))
if unknowns:
raise ValueError("Unknown artist properties: %s" % unknowns)
# Not a full validation, but it'll at least normalize property names
# A fuller validation would require v0.10 of cycler.
checker = set()
for prop in cycler_inst.keys:
norm_prop = _prop_aliases.get(prop, prop)
if norm_prop != prop and norm_prop in cycler_inst.keys:
raise ValueError(f"Cannot specify both {norm_prop!r} and alias "
f"{prop!r} in the same prop_cycle")
if norm_prop in checker:
raise ValueError(f"Another property was already aliased to "
f"{norm_prop!r}. Collision normalizing {prop!r}.")
checker.update([norm_prop])
# This is just an extra-careful check, just in case there is some
# edge-case I haven't thought of.
assert len(checker) == len(cycler_inst.keys)
# Now, it should be safe to mutate this cycler
for prop in cycler_inst.keys:
norm_prop = _prop_aliases.get(prop, prop)
cycler_inst.change_key(prop, norm_prop)
for key, vals in cycler_inst.by_key().items():
_prop_validators[key](vals)
return cycler_inst
def validate_hist_bins(s):
valid_strs = ["auto", "sturges", "fd", "doane", "scott", "rice", "sqrt"]
if isinstance(s, str) and s in valid_strs:
return s
try:
return int(s)
except (TypeError, ValueError):
pass
try:
return validate_floatlist(s)
except ValueError:
pass
raise ValueError(f"'hist.bins' must be one of {valid_strs}, an int or"
" a sequence of floats")
class _ignorecase(list):
"""A marker class indicating that a list-of-str is case-insensitive."""
def _convert_validator_spec(key, conv):
if isinstance(conv, list):
ignorecase = isinstance(conv, _ignorecase)
return ValidateInStrings(key, conv, ignorecase=ignorecase)
else:
return conv
# Mapping of rcParams to validators.
# Converters given as lists or _ignorecase are converted to ValidateInStrings
# immediately below.
# The rcParams defaults are defined in lib/matplotlib/mpl-data/matplotlibrc, which
# gets copied to matplotlib/mpl-data/matplotlibrc by the setup script.
_validators = {
"backend": validate_backend,
"backend_fallback": validate_bool,
"figure.hooks": validate_stringlist,
"toolbar": _validate_toolbar,
"interactive": validate_bool,
"timezone": validate_string,
"webagg.port": validate_int,
"webagg.address": validate_string,
"webagg.open_in_browser": validate_bool,
"webagg.port_retries": validate_int,
# line props
"lines.linewidth": validate_float, # line width in points
"lines.linestyle": _validate_linestyle, # solid line
"lines.color": validate_color, # first color in color cycle
"lines.marker": _validate_marker, # marker name
"lines.markerfacecolor": validate_color_or_auto, # default color
"lines.markeredgecolor": validate_color_or_auto, # default color
"lines.markeredgewidth": validate_float,
"lines.markersize": validate_float, # markersize, in points
"lines.antialiased": validate_bool, # antialiased (no jaggies)
"lines.dash_joinstyle": JoinStyle,
"lines.solid_joinstyle": JoinStyle,
"lines.dash_capstyle": CapStyle,
"lines.solid_capstyle": CapStyle,
"lines.dashed_pattern": validate_floatlist,
"lines.dashdot_pattern": validate_floatlist,
"lines.dotted_pattern": validate_floatlist,
"lines.scale_dashes": validate_bool,
# marker props
"markers.fillstyle": validate_fillstyle,
## pcolor(mesh) props:
"pcolor.shading": ["auto", "flat", "nearest", "gouraud"],
"pcolormesh.snap": validate_bool,
## patch props
"patch.linewidth": validate_float, # line width in points
"patch.edgecolor": validate_color,
"patch.force_edgecolor": validate_bool,
"patch.facecolor": validate_color, # first color in cycle
"patch.antialiased": validate_bool, # antialiased (no jaggies)
## hatch props
"hatch.color": validate_color,
"hatch.linewidth": validate_float,
## Histogram properties
"hist.bins": validate_hist_bins,
## Boxplot properties
"boxplot.notch": validate_bool,
"boxplot.vertical": validate_bool,
"boxplot.whiskers": validate_whiskers,
"boxplot.bootstrap": validate_int_or_None,
"boxplot.patchartist": validate_bool,
"boxplot.showmeans": validate_bool,
"boxplot.showcaps": validate_bool,
"boxplot.showbox": validate_bool,
"boxplot.showfliers": validate_bool,
"boxplot.meanline": validate_bool,
"boxplot.flierprops.color": validate_color,
"boxplot.flierprops.marker": _validate_marker,
"boxplot.flierprops.markerfacecolor": validate_color_or_auto,
"boxplot.flierprops.markeredgecolor": validate_color,
"boxplot.flierprops.markeredgewidth": validate_float,
"boxplot.flierprops.markersize": validate_float,
"boxplot.flierprops.linestyle": _validate_linestyle,
"boxplot.flierprops.linewidth": validate_float,
"boxplot.boxprops.color": validate_color,
"boxplot.boxprops.linewidth": validate_float,
"boxplot.boxprops.linestyle": _validate_linestyle,
"boxplot.whiskerprops.color": validate_color,
"boxplot.whiskerprops.linewidth": validate_float,
"boxplot.whiskerprops.linestyle": _validate_linestyle,
"boxplot.capprops.color": validate_color,
"boxplot.capprops.linewidth": validate_float,
"boxplot.capprops.linestyle": _validate_linestyle,
"boxplot.medianprops.color": validate_color,
"boxplot.medianprops.linewidth": validate_float,
"boxplot.medianprops.linestyle": _validate_linestyle,
"boxplot.meanprops.color": validate_color,
"boxplot.meanprops.marker": _validate_marker,
"boxplot.meanprops.markerfacecolor": validate_color,
"boxplot.meanprops.markeredgecolor": validate_color,
"boxplot.meanprops.markersize": validate_float,
"boxplot.meanprops.linestyle": _validate_linestyle,
"boxplot.meanprops.linewidth": validate_float,
## font props
"font.family": validate_stringlist, # used by text object
"font.style": validate_string,
"font.variant": validate_string,
"font.stretch": validate_fontstretch,
"font.weight": validate_fontweight,
"font.size": validate_float, # Base font size in points
"font.serif": validate_stringlist,
"font.sans-serif": validate_stringlist,
"font.cursive": validate_stringlist,
"font.fantasy": validate_stringlist,
"font.monospace": validate_stringlist,
# text props
"text.color": validate_color,
"text.usetex": validate_bool,
"text.latex.preamble": validate_string,
"text.hinting": ["default", "no_autohint", "force_autohint",
"no_hinting", "auto", "native", "either", "none"],
"text.hinting_factor": validate_int,
"text.kerning_factor": validate_int,
"text.antialiased": validate_bool,
"text.parse_math": validate_bool,
"mathtext.cal": validate_font_properties,
"mathtext.rm": validate_font_properties,
"mathtext.tt": validate_font_properties,
"mathtext.it": validate_font_properties,
"mathtext.bf": validate_font_properties,
"mathtext.bfit": validate_font_properties,
"mathtext.sf": validate_font_properties,
"mathtext.fontset": ["dejavusans", "dejavuserif", "cm", "stix",
"stixsans", "custom"],
"mathtext.default": ["rm", "cal", "bfit", "it", "tt", "sf", "bf", "default",
"bb", "frak", "scr", "regular"],
"mathtext.fallback": _validate_mathtext_fallback,
"image.aspect": validate_aspect, # equal, auto, a number
"image.interpolation": validate_string,
"image.interpolation_stage": ["auto", "data", "rgba"],
"image.cmap": _validate_cmap, # gray, jet, etc.
"image.lut": validate_int, # lookup table
"image.origin": ["upper", "lower"],
"image.resample": validate_bool,
# Specify whether vector graphics backends will combine all images on a
# set of Axes into a single composite image
"image.composite_image": validate_bool,
# contour props
"contour.negative_linestyle": _validate_linestyle,
"contour.corner_mask": validate_bool,
"contour.linewidth": validate_float_or_None,
"contour.algorithm": ["mpl2005", "mpl2014", "serial", "threaded"],
# errorbar props
"errorbar.capsize": validate_float,
# axis props
# alignment of x/y axis title
"xaxis.labellocation": ["left", "center", "right"],
"yaxis.labellocation": ["bottom", "center", "top"],
# Axes props
"axes.axisbelow": validate_axisbelow,
"axes.facecolor": validate_color, # background color
"axes.edgecolor": validate_color, # edge color
"axes.linewidth": validate_float, # edge linewidth
"axes.spines.left": validate_bool, # Set visibility of axes spines,
"axes.spines.right": validate_bool, # i.e., the lines around the chart
"axes.spines.bottom": validate_bool, # denoting data boundary.
"axes.spines.top": validate_bool,
"axes.titlesize": validate_fontsize, # Axes title fontsize
"axes.titlelocation": ["left", "center", "right"], # Axes title alignment
"axes.titleweight": validate_fontweight, # Axes title font weight
"axes.titlecolor": validate_color_or_auto, # Axes title font color
# title location, axes units, None means auto
"axes.titley": validate_float_or_None,
# pad from Axes top decoration to title in points
"axes.titlepad": validate_float,
"axes.grid": validate_bool, # display grid or not
"axes.grid.which": ["minor", "both", "major"], # which grids are drawn
"axes.grid.axis": ["x", "y", "both"], # grid type
"axes.labelsize": validate_fontsize, # fontsize of x & y labels
"axes.labelpad": validate_float, # space between label and axis
"axes.labelweight": validate_fontweight, # fontsize of x & y labels
"axes.labelcolor": validate_color, # color of axis label
# use scientific notation if log10 of the axis range is smaller than the
# first or larger than the second
"axes.formatter.limits": _listify_validator(validate_int, n=2),
# use current locale to format ticks
"axes.formatter.use_locale": validate_bool,
"axes.formatter.use_mathtext": validate_bool,
# minimum exponent to format in scientific notation
"axes.formatter.min_exponent": validate_int,
"axes.formatter.useoffset": validate_bool,
"axes.formatter.offset_threshold": validate_int,
"axes.unicode_minus": validate_bool,
# This entry can be either a cycler object or a string repr of a
# cycler-object, which gets eval()'ed to create the object.
"axes.prop_cycle": validate_cycler,
# If "data", axes limits are set close to the data.
# If "round_numbers" axes limits are set to the nearest round numbers.
"axes.autolimit_mode": ["data", "round_numbers"],
"axes.xmargin": _validate_greaterthan_minushalf, # margin added to xaxis
"axes.ymargin": _validate_greaterthan_minushalf, # margin added to yaxis
"axes.zmargin": _validate_greaterthan_minushalf, # margin added to zaxis
"polaraxes.grid": validate_bool, # display polar grid or not
"axes3d.grid": validate_bool, # display 3d grid
"axes3d.automargin": validate_bool, # automatically add margin when
# manually setting 3D axis limits
"axes3d.xaxis.panecolor": validate_color, # 3d background pane
"axes3d.yaxis.panecolor": validate_color, # 3d background pane
"axes3d.zaxis.panecolor": validate_color, # 3d background pane
"axes3d.mouserotationstyle": ["azel", "trackball", "sphere", "arcball"],
"axes3d.trackballsize": validate_float,
"axes3d.trackballborder": validate_float,
# scatter props
"scatter.marker": _validate_marker,
"scatter.edgecolors": validate_string,
"date.epoch": _validate_date,
"date.autoformatter.year": validate_string,
"date.autoformatter.month": validate_string,
"date.autoformatter.day": validate_string,
"date.autoformatter.hour": validate_string,
"date.autoformatter.minute": validate_string,
"date.autoformatter.second": validate_string,
"date.autoformatter.microsecond": validate_string,
'date.converter': ['auto', 'concise'],
# for auto date locator, choose interval_multiples
'date.interval_multiples': validate_bool,
# legend properties
"legend.fancybox": validate_bool,
"legend.loc": _validate_legend_loc,
# the number of points in the legend line
"legend.numpoints": validate_int,
# the number of points in the legend line for scatter
"legend.scatterpoints": validate_int,
"legend.fontsize": validate_fontsize,
"legend.title_fontsize": validate_fontsize_None,
# color of the legend
"legend.labelcolor": _validate_color_or_linecolor,
# the relative size of legend markers vs. original
"legend.markerscale": validate_float,
# using dict in rcParams not yet supported, so make sure it is bool
"legend.shadow": validate_bool,
# whether or not to draw a frame around legend
"legend.frameon": validate_bool,
# alpha value of the legend frame
"legend.framealpha": validate_float_or_None,
## the following dimensions are in fraction of the font size
"legend.borderpad": validate_float, # units are fontsize
# the vertical space between the legend entries
"legend.labelspacing": validate_float,
# the length of the legend lines
"legend.handlelength": validate_float,
# the length of the legend lines
"legend.handleheight": validate_float,
# the space between the legend line and legend text
"legend.handletextpad": validate_float,
# the border between the Axes and legend edge
"legend.borderaxespad": validate_float,
# the border between the Axes and legend edge
"legend.columnspacing": validate_float,
"legend.facecolor": validate_color_or_inherit,
"legend.edgecolor": validate_color_or_inherit,
# tick properties
"xtick.top": validate_bool, # draw ticks on top side
"xtick.bottom": validate_bool, # draw ticks on bottom side
"xtick.labeltop": validate_bool, # draw label on top
"xtick.labelbottom": validate_bool, # draw label on bottom
"xtick.major.size": validate_float, # major xtick size in points
"xtick.minor.size": validate_float, # minor xtick size in points
"xtick.major.width": validate_float, # major xtick width in points
"xtick.minor.width": validate_float, # minor xtick width in points
"xtick.major.pad": validate_float, # distance to label in points
"xtick.minor.pad": validate_float, # distance to label in points
"xtick.color": validate_color, # color of xticks
"xtick.labelcolor": validate_color_or_inherit, # color of xtick labels
"xtick.minor.visible": validate_bool, # visibility of minor xticks
"xtick.minor.top": validate_bool, # draw top minor xticks
"xtick.minor.bottom": validate_bool, # draw bottom minor xticks
"xtick.major.top": validate_bool, # draw top major xticks
"xtick.major.bottom": validate_bool, # draw bottom major xticks
# number of minor xticks
"xtick.minor.ndivs": _validate_minor_tick_ndivs,
"xtick.labelsize": validate_fontsize, # fontsize of xtick labels
"xtick.direction": ["out", "in", "inout"], # direction of xticks
"xtick.alignment": ["center", "right", "left"],
"ytick.left": validate_bool, # draw ticks on left side
"ytick.right": validate_bool, # draw ticks on right side
"ytick.labelleft": validate_bool, # draw tick labels on left side
"ytick.labelright": validate_bool, # draw tick labels on right side
"ytick.major.size": validate_float, # major ytick size in points
"ytick.minor.size": validate_float, # minor ytick size in points
"ytick.major.width": validate_float, # major ytick width in points
"ytick.minor.width": validate_float, # minor ytick width in points
"ytick.major.pad": validate_float, # distance to label in points
"ytick.minor.pad": validate_float, # distance to label in points
"ytick.color": validate_color, # color of yticks
"ytick.labelcolor": validate_color_or_inherit, # color of ytick labels
"ytick.minor.visible": validate_bool, # visibility of minor yticks
"ytick.minor.left": validate_bool, # draw left minor yticks
"ytick.minor.right": validate_bool, # draw right minor yticks
"ytick.major.left": validate_bool, # draw left major yticks
"ytick.major.right": validate_bool, # draw right major yticks
# number of minor yticks
"ytick.minor.ndivs": _validate_minor_tick_ndivs,
"ytick.labelsize": validate_fontsize, # fontsize of ytick labels
"ytick.direction": ["out", "in", "inout"], # direction of yticks
"ytick.alignment": [
"center", "top", "bottom", "baseline", "center_baseline"],
"grid.color": validate_color, # grid color
"grid.linestyle": _validate_linestyle, # solid
"grid.linewidth": validate_float, # in points
"grid.alpha": validate_float,
## figure props
# figure title
"figure.titlesize": validate_fontsize,
"figure.titleweight": validate_fontweight,
# figure labels
"figure.labelsize": validate_fontsize,
"figure.labelweight": validate_fontweight,
# figure size in inches: width by height
"figure.figsize": _listify_validator(validate_float, n=2),
"figure.dpi": validate_float,
"figure.facecolor": validate_color,
"figure.edgecolor": validate_color,
"figure.frameon": validate_bool,
"figure.autolayout": validate_bool,
"figure.max_open_warning": validate_int,
"figure.raise_window": validate_bool,
"macosx.window_mode": ["system", "tab", "window"],
"figure.subplot.left": validate_float,
"figure.subplot.right": validate_float,
"figure.subplot.bottom": validate_float,
"figure.subplot.top": validate_float,
"figure.subplot.wspace": validate_float,
"figure.subplot.hspace": validate_float,
"figure.constrained_layout.use": validate_bool, # run constrained_layout?
# wspace and hspace are fraction of adjacent subplots to use for space.
# Much smaller than above because we don't need room for the text.
"figure.constrained_layout.hspace": validate_float,
"figure.constrained_layout.wspace": validate_float,
# buffer around the Axes, in inches.
"figure.constrained_layout.h_pad": validate_float,
"figure.constrained_layout.w_pad": validate_float,
## Saving figure's properties
'savefig.dpi': validate_dpi,
'savefig.facecolor': validate_color_or_auto,
'savefig.edgecolor': validate_color_or_auto,
'savefig.orientation': ['landscape', 'portrait'],
"savefig.format": validate_string,
"savefig.bbox": validate_bbox, # "tight", or "standard" (= None)
"savefig.pad_inches": validate_float,
# default directory in savefig dialog box
"savefig.directory": _validate_pathlike,
"savefig.transparent": validate_bool,
"tk.window_focus": validate_bool, # Maintain shell focus for TkAgg
# Set the papersize/type
"ps.papersize": _ignorecase(
["figure", "letter", "legal", "ledger",
*[f"{ab}{i}" for ab in "ab" for i in range(11)]]),
"ps.useafm": validate_bool,
# use ghostscript or xpdf to distill ps output
"ps.usedistiller": validate_ps_distiller,
"ps.distiller.res": validate_int, # dpi
"ps.fonttype": validate_fonttype, # 3 (Type3) or 42 (Truetype)
"pdf.compression": validate_int, # 0-9 compression level; 0 to disable
"pdf.inheritcolor": validate_bool, # skip color setting commands
# use only the 14 PDF core fonts embedded in every PDF viewing application
"pdf.use14corefonts": validate_bool,
"pdf.fonttype": validate_fonttype, # 3 (Type3) or 42 (Truetype)
"pgf.texsystem": ["xelatex", "lualatex", "pdflatex"], # latex variant used
"pgf.rcfonts": validate_bool, # use mpl's rc settings for font config
"pgf.preamble": validate_string, # custom LaTeX preamble
# write raster image data into the svg file
"svg.image_inline": validate_bool,
"svg.fonttype": ["none", "path"], # save text as text ("none") or "paths"
"svg.hashsalt": validate_string_or_None,
"svg.id": validate_string_or_None,
# set this when you want to generate hardcopy docstring
"docstring.hardcopy": validate_bool,
"path.simplify": validate_bool,
"path.simplify_threshold": _validate_greaterequal0_lessequal1,
"path.snap": validate_bool,
"path.sketch": validate_sketch,
"path.effects": validate_anylist,
"agg.path.chunksize": validate_int, # 0 to disable chunking
# key-mappings (multi-character mappings should be a list/tuple)
"keymap.fullscreen": validate_stringlist,
"keymap.home": validate_stringlist,
"keymap.back": validate_stringlist,
"keymap.forward": validate_stringlist,
"keymap.pan": validate_stringlist,
"keymap.zoom": validate_stringlist,
"keymap.save": validate_stringlist,
"keymap.quit": validate_stringlist,
"keymap.quit_all": validate_stringlist, # e.g.: "W", "cmd+W", "Q"
"keymap.grid": validate_stringlist,
"keymap.grid_minor": validate_stringlist,
"keymap.yscale": validate_stringlist,
"keymap.xscale": validate_stringlist,
"keymap.help": validate_stringlist,
"keymap.copy": validate_stringlist,
# Animation settings
"animation.html": ["html5", "jshtml", "none"],
# Limit, in MB, of size of base64 encoded animation in HTML
# (i.e. IPython notebook)
"animation.embed_limit": validate_float,
"animation.writer": validate_string,
"animation.codec": validate_string,
"animation.bitrate": validate_int,
# Controls image format when frames are written to disk
"animation.frame_format": ["png", "jpeg", "tiff", "raw", "rgba", "ppm",
"sgi", "bmp", "pbm", "svg"],
# Path to ffmpeg binary. If just binary name, subprocess uses $PATH.
"animation.ffmpeg_path": _validate_pathlike,
# Additional arguments for ffmpeg movie writer (using pipes)
"animation.ffmpeg_args": validate_stringlist,
# Path to convert binary. If just binary name, subprocess uses $PATH.
"animation.convert_path": _validate_pathlike,
# Additional arguments for convert movie writer (using pipes)
"animation.convert_args": validate_stringlist,
# Classic (pre 2.0) compatibility mode
# This is used for things that are hard to make backward compatible
# with a sane rcParam alone. This does *not* turn on classic mode
# altogether. For that use `matplotlib.style.use("classic")`.
"_internal.classic_mode": validate_bool
}
_hardcoded_defaults = { # Defaults not inferred from
# lib/matplotlib/mpl-data/matplotlibrc...
# ... because they are private:
"_internal.classic_mode": False,
# ... because they are deprecated:
# No current deprecations.
# backend is handled separately when constructing rcParamsDefault.
}
_validators = {k: _convert_validator_spec(k, conv)
for k, conv in _validators.items()}
venv\Lib\site-packages\matplotlib\sankey.py
"""
Module for creating Sankey diagrams using Matplotlib.
"""
import logging
from types import SimpleNamespace
import numpy as np
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Affine2D
from matplotlib import _docstring
_log = logging.getLogger(__name__)
__author__ = "Kevin L. Davies"
__credits__ = ["Yannick Copin"]
__license__ = "BSD"
__version__ = "2011/09/16"
# Angles [deg/90]
RIGHT = 0
UP = 1
# LEFT = 2
DOWN = 3
class Sankey:
"""
Sankey diagram.
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) `_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
The optional arguments listed below are applied to all subdiagrams so
that there is consistent alignment and formatting.
In order to draw a complex Sankey diagram, create an instance of
`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
Other Parameters
----------------
ax : `~matplotlib.axes.Axes`
Axes onto which the data should be plotted. If *ax* isn't
provided, new Axes will be created.
scale : float
Scaling factor for the flows. *scale* sizes the width of the paths
in order to maintain proper layout. The same scale is applied to
all subdiagrams. The value should be chosen such that the product
of the scale and the sum of the inputs is approximately 1.0 (and
the product of the scale and the sum of the outputs is
approximately -1.0).
unit : str
The physical unit associated with the flow quantities. If *unit*
is None, then none of the quantities are labeled.
format : str or callable
A Python number formatting string or callable used to label the
flows with their quantities (i.e., a number times a unit, where the
unit is given). If a format string is given, the label will be
``format % quantity``. If a callable is given, it will be called
with ``quantity`` as an argument.
gap : float
Space between paths that break in/break away to/from the top or
bottom.
radius : float
Inner radius of the vertical paths.
shoulder : float
Size of the shoulders of output arrows.
offset : float
Text offset (from the dip or tip of the arrow).
head_angle : float
Angle, in degrees, of the arrow heads (and negative of the angle of
the tails).
margin : float
Minimum space between Sankey outlines and the edge of the plot
area.
tolerance : float
Acceptable maximum of the magnitude of the sum of flows. The
magnitude of the sum of connected flows cannot be greater than
*tolerance*.
**kwargs
Any additional keyword arguments will be passed to `add`, which
will create the first subdiagram.
See Also
--------
Sankey.add
Sankey.finish
Examples
--------
.. plot:: gallery/specialty_plots/sankey_basics.py
"""
# Check the arguments.
if gap < 0:
raise ValueError(
"'gap' is negative, which is not allowed because it would "
"cause the paths to overlap")
if radius > gap:
raise ValueError(
"'radius' is greater than 'gap', which is not allowed because "
"it would cause the paths to overlap")
if head_angle < 0:
raise ValueError(
"'head_angle' is negative, which is not allowed because it "
"would cause inputs to look like outputs and vice versa")
if tolerance < 0:
raise ValueError(
"'tolerance' is negative, but it must be a magnitude")
# Create Axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Other Parameters
----------------
quadrant : {0, 1, 2, 3}, default: 0
Uses 0-based indexing (0, 1, 2, or 3).
cw : bool, default: True
If True, the arc vertices are produced clockwise; counter-clockwise
otherwise.
radius : float, default: 1
The radius of the arc.
center : (float, float), default: (0, 0)
(x, y) tuple of the arc's center.
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0, 90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
# [6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant in (0, 2):
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
else: # 1, 3
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign, quadrant = 1, 3
else:
sign, quadrant = -1, 0
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply reversible by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
# path[1] = path[1][-1:0:-1]
# path[1][0] = first_action
# path[2] = path[2][::-1]
# return path
@_docstring.interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Parameters
----------
patchlabel : str
Label to be placed at the center of the diagram.
Note that *label* (not *patchlabel*) can be passed as keyword
argument to create an entry in the legend.
flows : list of float
Array of flow values. By convention, inputs are positive and
outputs are negative.
Flows are placed along the top of the diagram from the inside out
in order of their index within *flows*. They are placed along the
sides of the diagram from the top down and along the bottom from
the outside in.
If the sum of the inputs and outputs is
nonzero, the discrepancy will appear as a cubic Bézier curve along
the top and bottom edges of the trunk.
orientations : list of {-1, 0, 1}
List of orientations of the flows (or a single orientation to be
used for all flows). Valid values are 0 (inputs from
the left, outputs to the right), 1 (from and to the top) or -1
(from and to the bottom).
labels : list of (str or None)
List of labels for the flows (or a single label to be used for all
flows). Each label may be *None* (no label), or a labeling string.
If an entry is a (possibly empty) string, then the quantity for the
corresponding flow will be shown below the string. However, if
the *unit* of the main diagram is None, then quantities are never
shown, regardless of the value of this argument.
trunklength : float
Length between the bases of the input and output groups (in
data-space units).
pathlengths : list of float
List of lengths of the vertical arrows before break-in or after
break-away. If a single value is given, then it will be applied to
the first (inside) paths on the top and bottom, and the length of
all other arrows will be justified accordingly. The *pathlengths*
are not applied to the horizontal inputs and outputs.
prior : int
Index of the prior diagram to which this diagram should be
connected.
connect : (int, int)
A (prior, this) tuple indexing the flow of the prior diagram and
the flow of this diagram which should be connected. If this is the
first diagram or *prior* is *None*, *connect* will be ignored.
rotation : float
Angle of rotation of the diagram in degrees. The interpretation of
the *orientations* argument will be rotated accordingly (e.g., if
*rotation* == 90, an *orientations* entry of 1 means to/from the
left). *rotation* is ignored if this diagram is connected to an
existing one (using *prior* and *connect*).
Returns
-------
Sankey
The current `.Sankey` instance.
Other Parameters
----------------
**kwargs
Additional keyword arguments set `matplotlib.patches.PathPatch`
properties, listed below. For example, one may want to use
``fill=False`` or ``label="A legend entry"``.
%(Patch:kwdoc)s
See Also
--------
Sankey.finish
"""
# Check and preprocess the arguments.
flows = np.array([1.0, -1.0]) if flows is None else np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = 0
try:
orientations = np.broadcast_to(orientations, n)
except ValueError:
raise ValueError(
f"The shapes of 'flows' {np.shape(flows)} and 'orientations' "
f"{np.shape(orientations)} are incompatible"
) from None
try:
labels = np.broadcast_to(labels, n)
except ValueError:
raise ValueError(
f"The shapes of 'flows' {np.shape(flows)} and 'labels' "
f"{np.shape(labels)} are incompatible"
) from None
if trunklength < 0:
raise ValueError(
"'trunklength' is negative, which is not allowed because it "
"would cause poor layout")
if abs(np.sum(flows)) > self.tolerance:
_log.info("The sum of the flows is nonzero (%f; patchlabel=%r); "
"is the system not at steady state?",
np.sum(flows), patchlabel)
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if prior is not None:
if prior < 0:
raise ValueError("The index of the prior diagram is negative")
if min(connect) < 0:
raise ValueError(
"At least one of the connection indices is negative")
if prior >= len(self.diagrams):
raise ValueError(
f"The index of the prior diagram is {prior}, but there "
f"are only {len(self.diagrams)} other diagrams")
if connect[0] >= len(self.diagrams[prior].flows):
raise ValueError(
"The connection index to the source diagram is {}, but "
"that diagram has only {} flows".format(
connect[0], len(self.diagrams[prior].flows)))
if connect[1] >= n:
raise ValueError(
f"The connection index to this diagram is {connect[1]}, "
f"but this diagram has only {n} flows")
if self.diagrams[prior].angles[connect[0]] is None:
raise ValueError(
f"The connection cannot be made, which may occur if the "
f"magnitude of flow {connect[0]} of diagram {prior} is "
f"less than the specified tolerance")
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
if abs(flow_error) >= self.tolerance:
raise ValueError(
f"The scaled sum of the connected flows is {flow_error}, "
f"which is not within the tolerance ({self.tolerance})")
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
_log.info(
"The magnitude of flow %d (%f) is below the tolerance "
"(%f).\nIt will not be shown, and it cannot be used in a "
"connection.", i, flow, self.tolerance)
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif is_input is False:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
if orient != -1:
raise ValueError(
f"The value of orientations[{i}] is {orient}, "
f"but it must be -1, 0, or 1")
if is_input:
angles[i] = UP
elif is_input is False:
angles[i] = DOWN
# Justify the lengths of the paths.
if np.iterable(pathlengths):
if len(pathlengths) != n:
raise ValueError(
f"The lengths of 'flows' ({n}) and 'pathlengths' "
f"({len(pathlengths)}) are incompatible")
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and is_input is False:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and is_input is False:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT:
if is_input is False:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and is_input is False:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and is_input is False:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT and is_input is False:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = zip(*path)
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_affine
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if mpl.rcParams['_internal.classic_mode']:
fc = kwargs.pop('fc', kwargs.pop('facecolor', '#bfd1d4'))
lw = kwargs.pop('lw', kwargs.pop('linewidth', 0.5))
else:
fc = kwargs.pop('fc', kwargs.pop('facecolor', None))
lw = kwargs.pop('lw', kwargs.pop('linewidth', None))
if fc is None:
fc = self.ax._get_patches_for_fill.get_next_color()
patch = PathPatch(Path(vertices, codes), fc=fc, lw=lw, **kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
if isinstance(self.format, str):
quantity = self.format % abs(number) + self.unit
elif callable(self.format):
quantity = self.format(number)
else:
raise TypeError(
'format must be callable or a format string')
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(
SimpleNamespace(patch=patch, flows=flows, angles=angles, tips=tips,
text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the Axes and return a list of information about the Sankey
subdiagram(s).
Returns a list of subdiagrams with the following fields:
======== =============================================================
Field Description
======== =============================================================
*patch* Sankey outline (a `~matplotlib.patches.PathPatch`).
*flows* Flow values (positive for input, negative for output).
*angles* List of angles of the arrows [deg/90].
For example, if the diagram has not been rotated,
an input to the top side has an angle of 3 (DOWN),
and an output from the top side has an angle of 1 (UP).
If a flow has been skipped (because its magnitude is less
than *tolerance*), then its angle will be *None*.
*tips* (N, 2)-array of the (x, y) positions of the tips (or "dips")
of the flow paths.
If the magnitude of a flow is less the *tolerance* of this
`Sankey` instance, the flow is skipped and its tip will be at
the center of the diagram.
*text* `.Text` instance for the diagram label.
*texts* List of `.Text` instances for the flow labels.
======== =============================================================
See Also
--------
Sankey.add
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
venv\Lib\site-packages\matplotlib\scale.py
"""
Scales define the distribution of data values on an axis, e.g. a log scaling.
The mapping is implemented through `.Transform` subclasses.
The following scales are built-in:
.. _builtin_scales:
============= ===================== ================================ =================================
Name Class Transform Inverted transform
============= ===================== ================================ =================================
"asinh" `AsinhScale` `AsinhTransform` `InvertedAsinhTransform`
"function" `FuncScale` `FuncTransform` `FuncTransform`
"functionlog" `FuncScaleLog` `FuncTransform` + `LogTransform` `InvertedLogTransform` + `FuncTransform`
"linear" `LinearScale` `.IdentityTransform` `.IdentityTransform`
"log" `LogScale` `LogTransform` `InvertedLogTransform`
"logit" `LogitScale` `LogitTransform` `LogisticTransform`
"symlog" `SymmetricalLogScale` `SymmetricalLogTransform` `InvertedSymmetricalLogTransform`
============= ===================== ================================ =================================
A user will often only use the scale name, e.g. when setting the scale through
`~.Axes.set_xscale`: ``ax.set_xscale("log")``.
See also the :ref:`scales examples ` in the documentation.
Custom scaling can be achieved through `FuncScale`, or by creating your own
`ScaleBase` subclass and corresponding transforms (see :doc:`/gallery/scales/custom_scale`).
Third parties can register their scales by name through `register_scale`.
""" # noqa: E501
import inspect
import textwrap
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
from matplotlib.ticker import (
NullFormatter, ScalarFormatter, LogFormatterSciNotation, LogitFormatter,
NullLocator, LogLocator, AutoLocator, AutoMinorLocator,
SymmetricalLogLocator, AsinhLocator, LogitLocator)
from matplotlib.transforms import Transform, IdentityTransform
class ScaleBase:
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Subclasses should override
:attr:`name`
The scale's name.
:meth:`get_transform`
A method returning a `.Transform`, which converts data coordinates to
scaled coordinates. This transform should be invertible, so that e.g.
mouse positions can be converted back to data coordinates.
:meth:`set_default_locators_and_formatters`
A method that sets default locators and formatters for an `~.axis.Axis`
that uses this scale.
:meth:`limit_range_for_scale`
An optional method that "fixes" the axis range to acceptable values,
e.g. restricting log-scaled axes to positive values.
"""
def __init__(self, axis):
r"""
Construct a new scale.
Notes
-----
The following note is for scale implementers.
For back-compatibility reasons, scales take an `~matplotlib.axis.Axis`
object as first argument. However, this argument should not
be used: a single scale object should be usable by multiple
`~matplotlib.axis.Axis`\es at the same time.
"""
def get_transform(self):
"""
Return the `.Transform` object associated with this scale.
"""
raise NotImplementedError()
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters of *axis* to instances suitable for
this scale.
"""
raise NotImplementedError()
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Return the range *vmin*, *vmax*, restricted to the
domain supported by this scale (if any).
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis):
# This method is present only to prevent inheritance of the base class'
# constructor docstring, which would otherwise end up interpolated into
# the docstring of Axis.set_scale.
"""
""" # noqa: D419
def set_default_locators_and_formatters(self, axis):
# docstring inherited
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_formatter(NullFormatter())
# update the minor locator for x and y axis based on rcParams
if (axis.axis_name == 'x' and mpl.rcParams['xtick.minor.visible'] or
axis.axis_name == 'y' and mpl.rcParams['ytick.minor.visible']):
axis.set_minor_locator(AutoMinorLocator())
else:
axis.set_minor_locator(NullLocator())
def get_transform(self):
"""
Return the transform for linear scaling, which is just the
`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
class FuncTransform(Transform):
"""
A simple transform that takes and arbitrary function for the
forward and inverse transform.
"""
input_dims = output_dims = 1
def __init__(self, forward, inverse):
"""
Parameters
----------
forward : callable
The forward function for the transform. This function must have
an inverse and, for best behavior, be monotonic.
It must have the signature::
def forward(values: array-like) -> array-like
inverse : callable
The inverse of the forward function. Signature as ``forward``.
"""
super().__init__()
if callable(forward) and callable(inverse):
self._forward = forward
self._inverse = inverse
else:
raise ValueError('arguments to FuncTransform must be functions')
def transform_non_affine(self, values):
return self._forward(values)
def inverted(self):
return FuncTransform(self._inverse, self._forward)
class FuncScale(ScaleBase):
"""
Provide an arbitrary scale with user-supplied function for the axis.
"""
name = 'function'
def __init__(self, axis, functions):
"""
Parameters
----------
axis : `~matplotlib.axis.Axis`
The axis for the scale.
functions : (callable, callable)
two-tuple of the forward and inverse functions for the scale.
The forward function must be monotonic.
Both functions must have the signature::
def forward(values: array-like) -> array-like
"""
forward, inverse = functions
transform = FuncTransform(forward, inverse)
self._transform = transform
def get_transform(self):
"""Return the `.FuncTransform` associated with this scale."""
return self._transform
def set_default_locators_and_formatters(self, axis):
# docstring inherited
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_formatter(NullFormatter())
# update the minor locator for x and y axis based on rcParams
if (axis.axis_name == 'x' and mpl.rcParams['xtick.minor.visible'] or
axis.axis_name == 'y' and mpl.rcParams['ytick.minor.visible']):
axis.set_minor_locator(AutoMinorLocator())
else:
axis.set_minor_locator(NullLocator())
class LogTransform(Transform):
input_dims = output_dims = 1
def __init__(self, base, nonpositive='clip'):
super().__init__()
if base <= 0 or base == 1:
raise ValueError('The log base cannot be <= 0 or == 1')
self.base = base
self._clip = _api.check_getitem(
{"clip": True, "mask": False}, nonpositive=nonpositive)
def __str__(self):
return "{}(base={}, nonpositive={!r})".format(
type(self).__name__, self.base, "clip" if self._clip else "mask")
def transform_non_affine(self, values):
# Ignore invalid values due to nans being passed to the transform.
with np.errstate(divide="ignore", invalid="ignore"):
log = {np.e: np.log, 2: np.log2, 10: np.log10}.get(self.base)
if log: # If possible, do everything in a single call to NumPy.
out = log(values)
else:
out = np.log(values)
out /= np.log(self.base)
if self._clip:
# SVG spec says that conforming viewers must support values up
# to 3.4e38 (C float); however experiments suggest that
# Inkscape (which uses cairo for rendering) runs into cairo's
# 24-bit limit (which is apparently shared by Agg).
# Ghostscript (used for pdf rendering appears to overflow even
# earlier, with the max value around 2 ** 15 for the tests to
# pass. On the other hand, in practice, we want to clip beyond
# np.log10(np.nextafter(0, 1)) ~ -323
# so 1000 seems safe.
out[values <= 0] = -1000
return out
def inverted(self):
return InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = output_dims = 1
def __init__(self, base):
super().__init__()
self.base = base
def __str__(self):
return f"{type(self).__name__}(base={self.base})"
def transform_non_affine(self, values):
return np.power(self.base, values)
def inverted(self):
return LogTransform(self.base)
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken to only plot positive values.
"""
name = 'log'
def __init__(self, axis, *, base=10, subs=None, nonpositive="clip"):
"""
Parameters
----------
axis : `~matplotlib.axis.Axis`
The axis for the scale.
base : float, default: 10
The base of the logarithm.
nonpositive : {'clip', 'mask'}, default: 'clip'
Determines the behavior for non-positive values. They can either
be masked as invalid, or clipped to a very small positive number.
subs : sequence of int, default: None
Where to place the subticks between each major tick. For example,
in a log10 scale, ``[2, 3, 4, 5, 6, 7, 8, 9]`` will place 8
logarithmically spaced minor ticks between each major tick.
"""
self._transform = LogTransform(base, nonpositive)
self.subs = subs
base = property(lambda self: self._transform.base)
def set_default_locators_and_formatters(self, axis):
# docstring inherited
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterSciNotation(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(
LogFormatterSciNotation(self.base,
labelOnlyBase=(self.subs is not None)))
def get_transform(self):
"""Return the `.LogTransform` associated with this scale."""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""Limit the domain to positive values."""
if not np.isfinite(minpos):
minpos = 1e-300 # Should rarely (if ever) have a visible effect.
return (minpos if vmin <= 0 else vmin,
minpos if vmax <= 0 else vmax)
class FuncScaleLog(LogScale):
"""
Provide an arbitrary scale with user-supplied function for the axis and
then put on a logarithmic axes.
"""
name = 'functionlog'
def __init__(self, axis, functions, base=10):
"""
Parameters
----------
axis : `~matplotlib.axis.Axis`
The axis for the scale.
functions : (callable, callable)
two-tuple of the forward and inverse functions for the scale.
The forward function must be monotonic.
Both functions must have the signature::
def forward(values: array-like) -> array-like
base : float, default: 10
Logarithmic base of the scale.
"""
forward, inverse = functions
self.subs = None
self._transform = FuncTransform(forward, inverse) + LogTransform(base)
@property
def base(self):
return self._transform._b.base # Base of the LogTransform.
def get_transform(self):
"""Return the `.Transform` associated with this scale."""
return self._transform
class SymmetricalLogTransform(Transform):
input_dims = output_dims = 1
def __init__(self, base, linthresh, linscale):
super().__init__()
if base <= 1.0:
raise ValueError("'base' must be larger than 1")
if linthresh <= 0.0:
raise ValueError("'linthresh' must be positive")
if linscale <= 0.0:
raise ValueError("'linscale' must be positive")
self.base = base
self.linthresh = linthresh
self.linscale = linscale
self._linscale_adj = (linscale / (1.0 - self.base ** -1))
self._log_base = np.log(base)
def transform_non_affine(self, values):
abs_a = np.abs(values)
with np.errstate(divide="ignore", invalid="ignore"):
out = np.sign(values) * self.linthresh * (
self._linscale_adj +
np.log(abs_a / self.linthresh) / self._log_base)
inside = abs_a <= self.linthresh
out[inside] = values[inside] * self._linscale_adj
return out
def inverted(self):
return InvertedSymmetricalLogTransform(self.base, self.linthresh,
self.linscale)
class InvertedSymmetricalLogTransform(Transform):
input_dims = output_dims = 1
def __init__(self, base, linthresh, linscale):
super().__init__()
symlog = SymmetricalLogTransform(base, linthresh, linscale)
self.base = base
self.linthresh = linthresh
self.invlinthresh = symlog.transform(linthresh)
self.linscale = linscale
self._linscale_adj = (linscale / (1.0 - self.base ** -1))
def transform_non_affine(self, values):
abs_a = np.abs(values)
with np.errstate(divide="ignore", invalid="ignore"):
out = np.sign(values) * self.linthresh * (
np.power(self.base,
abs_a / self.linthresh - self._linscale_adj))
inside = abs_a <= self.invlinthresh
out[inside] = values[inside] / self._linscale_adj
return out
def inverted(self):
return SymmetricalLogTransform(self.base,
self.linthresh, self.linscale)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
See :doc:`/gallery/scales/symlog_demo` for a detailed description.
Parameters
----------
base : float, default: 10
The base of the logarithm.
linthresh : float, default: 2
Defines the range ``(-x, x)``, within which the plot is linear.
This avoids having the plot go to infinity around zero.
subs : sequence of int
Where to place the subticks between each major tick.
For example, in a log10 scale: ``[2, 3, 4, 5, 6, 7, 8, 9]`` will place
8 logarithmically spaced minor ticks between each major tick.
linscale : float, optional
This allows the linear range ``(-linthresh, linthresh)`` to be
stretched relative to the logarithmic range. Its value is the number of
decades to use for each half of the linear range. For example, when
*linscale* == 1.0 (the default), the space used for the positive and
negative halves of the linear range will be equal to one decade in
the logarithmic range.
"""
name = 'symlog'
def __init__(self, axis, *, base=10, linthresh=2, subs=None, linscale=1):
self._transform = SymmetricalLogTransform(base, linthresh, linscale)
self.subs = subs
base = property(lambda self: self._transform.base)
linthresh = property(lambda self: self._transform.linthresh)
linscale = property(lambda self: self._transform.linscale)
def set_default_locators_and_formatters(self, axis):
# docstring inherited
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterSciNotation(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(),
self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""Return the `.SymmetricalLogTransform` associated with this scale."""
return self._transform
class AsinhTransform(Transform):
"""Inverse hyperbolic-sine transformation used by `.AsinhScale`"""
input_dims = output_dims = 1
def __init__(self, linear_width):
super().__init__()
if linear_width <= 0.0:
raise ValueError("Scale parameter 'linear_width' " +
"must be strictly positive")
self.linear_width = linear_width
def transform_non_affine(self, values):
return self.linear_width * np.arcsinh(values / self.linear_width)
def inverted(self):
return InvertedAsinhTransform(self.linear_width)
class InvertedAsinhTransform(Transform):
"""Hyperbolic sine transformation used by `.AsinhScale`"""
input_dims = output_dims = 1
def __init__(self, linear_width):
super().__init__()
self.linear_width = linear_width
def transform_non_affine(self, values):
return self.linear_width * np.sinh(values / self.linear_width)
def inverted(self):
return AsinhTransform(self.linear_width)
class AsinhScale(ScaleBase):
"""
A quasi-logarithmic scale based on the inverse hyperbolic sine (asinh)
For values close to zero, this is essentially a linear scale,
but for large magnitude values (either positive or negative)
it is asymptotically logarithmic. The transition between these
linear and logarithmic regimes is smooth, and has no discontinuities
in the function gradient in contrast to
the `.SymmetricalLogScale` ("symlog") scale.
Specifically, the transformation of an axis coordinate :math:`a` is
:math:`a \\rightarrow a_0 \\sinh^{-1} (a / a_0)` where :math:`a_0`
is the effective width of the linear region of the transformation.
In that region, the transformation is
:math:`a \\rightarrow a + \\mathcal{O}(a^3)`.
For large values of :math:`a` the transformation behaves as
:math:`a \\rightarrow a_0 \\, \\mathrm{sgn}(a) \\ln |a| + \\mathcal{O}(1)`.
.. note::
This API is provisional and may be revised in the future
based on early user feedback.
"""
name = 'asinh'
auto_tick_multipliers = {
3: (2, ),
4: (2, ),
5: (2, ),
8: (2, 4),
10: (2, 5),
16: (2, 4, 8),
64: (4, 16),
1024: (256, 512)
}
def __init__(self, axis, *, linear_width=1.0,
base=10, subs='auto', **kwargs):
"""
Parameters
----------
linear_width : float, default: 1
The scale parameter (elsewhere referred to as :math:`a_0`)
defining the extent of the quasi-linear region,
and the coordinate values beyond which the transformation
becomes asymptotically logarithmic.
base : int, default: 10
The number base used for rounding tick locations
on a logarithmic scale. If this is less than one,
then rounding is to the nearest integer multiple
of powers of ten.
subs : sequence of int
Multiples of the number base used for minor ticks.
If set to 'auto', this will use built-in defaults,
e.g. (2, 5) for base=10.
"""
super().__init__(axis)
self._transform = AsinhTransform(linear_width)
self._base = int(base)
if subs == 'auto':
self._subs = self.auto_tick_multipliers.get(self._base)
else:
self._subs = subs
linear_width = property(lambda self: self._transform.linear_width)
def get_transform(self):
return self._transform
def set_default_locators_and_formatters(self, axis):
axis.set(major_locator=AsinhLocator(self.linear_width,
base=self._base),
minor_locator=AsinhLocator(self.linear_width,
base=self._base,
subs=self._subs),
minor_formatter=NullFormatter())
if self._base > 1:
axis.set_major_formatter(LogFormatterSciNotation(self._base))
else:
axis.set_major_formatter('{x:.3g}')
class LogitTransform(Transform):
input_dims = output_dims = 1
def __init__(self, nonpositive='mask'):
super().__init__()
_api.check_in_list(['mask', 'clip'], nonpositive=nonpositive)
self._nonpositive = nonpositive
self._clip = {"clip": True, "mask": False}[nonpositive]
def transform_non_affine(self, values):
"""logit transform (base 10), masked or clipped"""
with np.errstate(divide="ignore", invalid="ignore"):
out = np.log10(values / (1 - values))
if self._clip: # See LogTransform for choice of clip value.
out[values <= 0] = -1000
out[1 <= values] = 1000
return out
def inverted(self):
return LogisticTransform(self._nonpositive)
def __str__(self):
return f"{type(self).__name__}({self._nonpositive!r})"
class LogisticTransform(Transform):
input_dims = output_dims = 1
def __init__(self, nonpositive='mask'):
super().__init__()
self._nonpositive = nonpositive
def transform_non_affine(self, values):
"""logistic transform (base 10)"""
return 1.0 / (1 + 10**(-values))
def inverted(self):
return LogitTransform(self._nonpositive)
def __str__(self):
return f"{type(self).__name__}({self._nonpositive!r})"
class LogitScale(ScaleBase):
"""
Logit scale for data between zero and one, both excluded.
This scale is similar to a log scale close to zero and to one, and almost
linear around 0.5. It maps the interval ]0, 1[ onto ]-infty, +infty[.
"""
name = 'logit'
def __init__(self, axis, nonpositive='mask', *,
one_half=r"\frac{1}{2}", use_overline=False):
r"""
Parameters
----------
axis : `~matplotlib.axis.Axis`
Currently unused.
nonpositive : {'mask', 'clip'}
Determines the behavior for values beyond the open interval ]0, 1[.
They can either be masked as invalid, or clipped to a number very
close to 0 or 1.
use_overline : bool, default: False
Indicate the usage of survival notation (\overline{x}) in place of
standard notation (1-x) for probability close to one.
one_half : str, default: r"\frac{1}{2}"
The string used for ticks formatter to represent 1/2.
"""
self._transform = LogitTransform(nonpositive)
self._use_overline = use_overline
self._one_half = one_half
def get_transform(self):
"""Return the `.LogitTransform` associated with this scale."""
return self._transform
def set_default_locators_and_formatters(self, axis):
# docstring inherited
# ..., 0.01, 0.1, 0.5, 0.9, 0.99, ...
axis.set_major_locator(LogitLocator())
axis.set_major_formatter(
LogitFormatter(
one_half=self._one_half,
use_overline=self._use_overline
)
)
axis.set_minor_locator(LogitLocator(minor=True))
axis.set_minor_formatter(
LogitFormatter(
minor=True,
one_half=self._one_half,
use_overline=self._use_overline
)
)
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to values between 0 and 1 (excluded).
"""
if not np.isfinite(minpos):
minpos = 1e-7 # Should rarely (if ever) have a visible effect.
return (minpos if vmin <= 0 else vmin,
1 - minpos if vmax >= 1 else vmax)
_scale_mapping = {
'linear': LinearScale,
'log': LogScale,
'symlog': SymmetricalLogScale,
'asinh': AsinhScale,
'logit': LogitScale,
'function': FuncScale,
'functionlog': FuncScaleLog,
}
def get_scale_names():
"""Return the names of the available scales."""
return sorted(_scale_mapping)
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
Parameters
----------
scale : {%(names)s}
axis : `~matplotlib.axis.Axis`
"""
scale_cls = _api.check_getitem(_scale_mapping, scale=scale)
return scale_cls(axis, **kwargs)
if scale_factory.__doc__:
scale_factory.__doc__ = scale_factory.__doc__ % {
"names": ", ".join(map(repr, get_scale_names()))}
def register_scale(scale_class):
"""
Register a new kind of scale.
Parameters
----------
scale_class : subclass of `ScaleBase`
The scale to register.
"""
_scale_mapping[scale_class.name] = scale_class
def _get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name, scale_class in _scale_mapping.items():
docstring = inspect.getdoc(scale_class.__init__) or ""
docs.extend([
f" {name!r}",
"",
textwrap.indent(docstring, " " * 8),
""
])
return "\n".join(docs)
_docstring.interpd.register(
scale_type='{%s}' % ', '.join([repr(x) for x in get_scale_names()]),
scale_docs=_get_scale_docs().rstrip(),
)
venv\Lib\site-packages\matplotlib\spines.py
from collections.abc import MutableMapping
import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
from matplotlib.artist import allow_rasterization
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatches
import matplotlib.path as mpath
class Spine(mpatches.Patch):
"""
An axis spine -- the line noting the data area boundaries.
Spines are the lines connecting the axis tick marks and noting the
boundaries of the data area. They can be placed at arbitrary
positions. See `~.Spine.set_position` for more information.
The default position is ``('outward', 0)``.
Spines are subclasses of `.Patch`, and inherit much of their behavior.
Spines draw a line, a circle, or an arc depending on if
`~.Spine.set_patch_line`, `~.Spine.set_patch_circle`, or
`~.Spine.set_patch_arc` has been called. Line-like is the default.
For examples see :ref:`spines_examples`.
"""
def __str__(self):
return "Spine"
@_docstring.interpd
def __init__(self, axes, spine_type, path, **kwargs):
"""
Parameters
----------
axes : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance containing the spine.
spine_type : str
The spine type.
path : `~matplotlib.path.Path`
The `.Path` instance used to draw the spine.
Other Parameters
----------------
**kwargs
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.get_figure(root=False))
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(mpl.rcParams['axes.edgecolor'])
self.set_linewidth(mpl.rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData) # default transform
self._bounds = None # default bounds
# Defer initial position determination. (Not much support for
# non-rectangular axes is currently implemented, and this lets
# them pass through the spines machinery without errors.)
self._position = None
_api.check_isinstance(mpath.Path, path=path)
self._path = path
# To support drawing both linear and circular spines, this
# class implements Patch behavior three ways. If
# self._patch_type == 'line', behave like a mpatches.PathPatch
# instance. If self._patch_type == 'circle', behave like a
# mpatches.Ellipse instance. If self._patch_type == 'arc', behave like
# a mpatches.Arc instance.
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = mtransforms.IdentityTransform()
def set_patch_arc(self, center, radius, theta1, theta2):
"""Set the spine to be arc-like."""
self._patch_type = 'arc'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._theta1 = theta1
self._theta2 = theta2
self._path = mpath.Path.arc(theta1, theta2)
# arc drawn on axes transform
self.set_transform(self.axes.transAxes)
self.stale = True
def set_patch_circle(self, center, radius):
"""Set the spine to be circular."""
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
# circle drawn on axes transform
self.set_transform(self.axes.transAxes)
self.stale = True
def set_patch_line(self):
"""Set the spine to be linear."""
self._patch_type = 'line'
self.stale = True
# Behavior copied from mpatches.Ellipse:
def _recompute_transform(self):
"""
Notes
-----
This cannot be called until after this has been added to an Axes,
otherwise unit conversion will fail. This makes it very important to
call the accessor method and not directly access the transformation
member variable.
"""
assert self._patch_type in ('arc', 'circle')
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type in ('arc', 'circle'):
self._recompute_transform()
return self._patch_transform
else:
return super().get_patch_transform()
def get_window_extent(self, renderer=None):
"""
Return the window extent of the spines in display space, including
padding for ticks (but not their labels)
See Also
--------
matplotlib.axes.Axes.get_tightbbox
matplotlib.axes.Axes.get_window_extent
"""
# make sure the location is updated so that transforms etc are correct:
self._adjust_location()
bb = super().get_window_extent(renderer=renderer)
if self.axis is None or not self.axis.get_visible():
return bb
bboxes = [bb]
drawn_ticks = self.axis._update_ticks()
major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None)
minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None)
for tick in [major_tick, minor_tick]:
if tick is None:
continue
bb0 = bb.frozen()
tickl = tick._size
tickdir = tick._tickdir
if tickdir == 'out':
padout = 1
padin = 0
elif tickdir == 'in':
padout = 0
padin = 1
else:
padout = 0.5
padin = 0.5
dpi = self.get_figure(root=True).dpi
padout = padout * tickl / 72 * dpi
padin = padin * tickl / 72 * dpi
if tick.tick1line.get_visible():
if self.spine_type == 'left':
bb0.x0 = bb0.x0 - padout
bb0.x1 = bb0.x1 + padin
elif self.spine_type == 'bottom':
bb0.y0 = bb0.y0 - padout
bb0.y1 = bb0.y1 + padin
if tick.tick2line.get_visible():
if self.spine_type == 'right':
bb0.x1 = bb0.x1 + padout
bb0.x0 = bb0.x0 - padin
elif self.spine_type == 'top':
bb0.y1 = bb0.y1 + padout
bb0.y0 = bb0.y0 - padout
bboxes.append(bb0)
return mtransforms.Bbox.union(bboxes)
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
# default position
self._position = ('outward', 0.0) # in points
self.set_position(self._position)
def register_axis(self, axis):
"""
Register an axis.
An axis should be registered with its corresponding spine from
the Axes instance. This allows the spine to clear any axis
properties when needed.
"""
self.axis = axis
self.stale = True
def clear(self):
"""Clear the current spine."""
self._clear()
if self.axis is not None:
self.axis.clear()
def _clear(self):
"""
Clear things directly related to the spine.
In this way it is possible to avoid clearing the Axis as well when calling
from library code where it is known that the Axis is cleared separately.
"""
self._position = None # clear position
def _adjust_location(self):
"""Automatically set spine bounds to the view interval."""
if self.spine_type == 'circle':
return
if self._bounds is not None:
low, high = self._bounds
elif self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError(f'unknown spine spine_type: {self.spine_type}')
if self._patch_type == 'arc':
if self.spine_type in ('bottom', 'top'):
try:
direction = self.axes.get_theta_direction()
except AttributeError:
direction = 1
try:
offset = self.axes.get_theta_offset()
except AttributeError:
offset = 0
low = low * direction + offset
high = high * direction + offset
if low > high:
low, high = high, low
self._path = mpath.Path.arc(np.rad2deg(low), np.rad2deg(high))
if self.spine_type == 'bottom':
if self.axis is None:
tr = mtransforms.IdentityTransform()
else:
tr = self.axis.get_transform()
rmin, rmax = tr.transform(self.axes.viewLim.intervaly)
try:
rorigin = self.axes.get_rorigin()
except AttributeError:
rorigin = rmin
else:
rorigin = tr.transform(rorigin)
scaled_diameter = (rmin - rorigin) / (rmax - rorigin)
self._height = scaled_diameter
self._width = scaled_diameter
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
else:
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
ret = super().draw(renderer)
self.stale = False
return ret
def set_position(self, position):
"""
Set the position of the spine.
Spine position is specified by a 2 tuple of (position type,
amount). The position types are:
* 'outward': place the spine out from the data area by the specified
number of points. (Negative values place the spine inwards.)
* 'axes': place the spine at the specified Axes coordinate (0 to 1).
* 'data': place the spine at the specified data coordinate.
Additionally, shorthand notations define a special positions:
* 'center' -> ``('axes', 0.5)``
* 'zero' -> ``('data', 0.0)``
Examples
--------
:doc:`/gallery/spines/spine_placement_demo`
"""
if position in ('center', 'zero'): # special positions
pass
else:
if len(position) != 2:
raise ValueError("position should be 'center' or 2-tuple")
if position[0] not in ['outward', 'axes', 'data']:
raise ValueError("position[0] should be one of 'outward', "
"'axes', or 'data' ")
self._position = position
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
self.stale = True
def get_position(self):
"""Return the spine position."""
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
"""Return the spine transform."""
self._ensure_position_is_set()
position = self._position
if isinstance(position, str):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, 'position should be 2-tuple'
position_type, amount = position
_api.check_in_list(['axes', 'outward', 'data'],
position_type=position_type)
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError(f'unknown spine spine_type: {self.spine_type!r}')
if position_type == 'outward':
if amount == 0: # short circuit commonest case
return base_transform
else:
offset_vec = {'left': (-1, 0), 'right': (1, 0),
'bottom': (0, -1), 'top': (0, 1),
}[self.spine_type]
# calculate x and y offset in dots
offset_dots = amount * np.array(offset_vec) / 72
return (base_transform
+ mtransforms.ScaledTranslation(
*offset_dots, self.get_figure(root=False).dpi_scale_trans))
elif position_type == 'axes':
if self.spine_type in ['left', 'right']:
# keep y unchanged, fix x at amount
return (mtransforms.Affine2D.from_values(0, 0, 0, 1, amount, 0)
+ base_transform)
elif self.spine_type in ['bottom', 'top']:
# keep x unchanged, fix y at amount
return (mtransforms.Affine2D.from_values(1, 0, 0, 0, 0, amount)
+ base_transform)
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
# The right and top spines have a default position of 1 in
# axes coordinates. When specifying the position in data
# coordinates, we need to calculate the position relative to 0.
amount -= 1
if self.spine_type in ('left', 'right'):
return mtransforms.blended_transform_factory(
mtransforms.Affine2D().translate(amount, 0)
+ self.axes.transData,
self.axes.transData)
elif self.spine_type in ('bottom', 'top'):
return mtransforms.blended_transform_factory(
self.axes.transData,
mtransforms.Affine2D().translate(0, amount)
+ self.axes.transData)
def set_bounds(self, low=None, high=None):
"""
Set the spine bounds.
Parameters
----------
low : float or None, optional
The lower spine bound. Passing *None* leaves the limit unchanged.
The bounds may also be passed as the tuple (*low*, *high*) as the
first positional argument.
.. ACCEPTS: (low: float, high: float)
high : float or None, optional
The higher spine bound. Passing *None* leaves the limit unchanged.
"""
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
if high is None and np.iterable(low):
low, high = low
old_low, old_high = self.get_bounds() or (None, None)
if low is None:
low = old_low
if high is None:
high = old_high
self._bounds = (low, high)
self.stale = True
def get_bounds(self):
"""Get the bounds of the spine."""
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
"""Create and return a linear `Spine`."""
# all values of 0.999 get replaced upon call to set_bounds()
if spine_type == 'left':
path = mpath.Path([(0.0, 0.999), (0.0, 0.999)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 0.999), (1.0, 0.999)])
elif spine_type == 'bottom':
path = mpath.Path([(0.999, 0.0), (0.999, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(0.999, 1.0), (0.999, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
result.set_visible(mpl.rcParams[f'axes.spines.{spine_type}'])
return result
@classmethod
def arc_spine(cls, axes, spine_type, center, radius, theta1, theta2,
**kwargs):
"""Create and return an arc `Spine`."""
path = mpath.Path.arc(theta1, theta2)
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_arc(center, radius, theta1, theta2)
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
"""Create and return a circular `Spine`."""
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
"""
Set the edgecolor.
Parameters
----------
c : :mpltype:`color`
Notes
-----
This method does not modify the facecolor (which defaults to "none"),
unlike the `.Patch.set_color` method defined in the parent class. Use
`.Patch.set_facecolor` to set the facecolor.
"""
self.set_edgecolor(c)
self.stale = True
class SpinesProxy:
"""
A proxy to broadcast ``set_*()`` and ``set()`` method calls to contained `.Spines`.
The proxy cannot be used for any other operations on its members.
The supported methods are determined dynamically based on the contained
spines. If not all spines support a given method, it's executed only on
the subset of spines that support it.
"""
def __init__(self, spine_dict):
self._spine_dict = spine_dict
def __getattr__(self, name):
broadcast_targets = [spine for spine in self._spine_dict.values()
if hasattr(spine, name)]
if (name != 'set' and not name.startswith('set_')) or not broadcast_targets:
raise AttributeError(
f"'SpinesProxy' object has no attribute '{name}'")
def x(_targets, _funcname, *args, **kwargs):
for spine in _targets:
getattr(spine, _funcname)(*args, **kwargs)
x = functools.partial(x, broadcast_targets, name)
x.__doc__ = broadcast_targets[0].__doc__
return x
def __dir__(self):
names = []
for spine in self._spine_dict.values():
names.extend(name
for name in dir(spine) if name.startswith('set_'))
return list(sorted(set(names)))
class Spines(MutableMapping):
r"""
The container of all `.Spine`\s in an Axes.
The interface is dict-like mapping names (e.g. 'left') to `.Spine` objects.
Additionally, it implements some pandas.Series-like features like accessing
elements by attribute::
spines['top'].set_visible(False)
spines.top.set_visible(False)
Multiple spines can be addressed simultaneously by passing a list::
spines[['top', 'right']].set_visible(False)
Use an open slice to address all spines::
spines[:].set_visible(False)
The latter two indexing methods will return a `SpinesProxy` that broadcasts all
``set_*()`` and ``set()`` calls to its members, but cannot be used for any other
operation.
"""
def __init__(self, **kwargs):
self._dict = kwargs
@classmethod
def from_dict(cls, d):
return cls(**d)
def __getstate__(self):
return self._dict
def __setstate__(self, state):
self.__init__(**state)
def __getattr__(self, name):
try:
return self._dict[name]
except KeyError:
raise AttributeError(
f"'Spines' object does not contain a '{name}' spine")
def __getitem__(self, key):
if isinstance(key, list):
unknown_keys = [k for k in key if k not in self._dict]
if unknown_keys:
raise KeyError(', '.join(unknown_keys))
return SpinesProxy({k: v for k, v in self._dict.items()
if k in key})
if isinstance(key, tuple):
raise ValueError('Multiple spines must be passed as a single list')
if isinstance(key, slice):
if key.start is None and key.stop is None and key.step is None:
return SpinesProxy(self._dict)
else:
raise ValueError(
'Spines does not support slicing except for the fully '
'open slice [:] to access all spines.')
return self._dict[key]
def __setitem__(self, key, value):
# TODO: Do we want to deprecate adding spines?
self._dict[key] = value
def __delitem__(self, key):
# TODO: Do we want to deprecate deleting spines?
del self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
venv\Lib\site-packages\matplotlib\stackplot.py
"""
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
https://stackoverflow.com/q/2225995/
(https://stackoverflow.com/users/66549/doug)
"""
import itertools
import numpy as np
from matplotlib import _api
__all__ = ['stackplot']
def stackplot(axes, x, *args,
labels=(), colors=None, hatch=None, baseline='zero',
**kwargs):
"""
Draw a stacked area plot or a streamgraph.
Parameters
----------
x : (N,) array-like
y : (M, N) array-like
The data can be either stacked or unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y has shape (M, N) e.g. y = [y1, y2, y3, y4]
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4 have length N
baseline : {'zero', 'sym', 'wiggle', 'weighted_wiggle'}
Method used to calculate the baseline:
- ``'zero'``: Constant zero baseline, i.e. a simple stacked plot.
- ``'sym'``: Symmetric around zero and is sometimes called
'ThemeRiver'.
- ``'wiggle'``: Minimizes the sum of the squared slopes.
- ``'weighted_wiggle'``: Does the same but weights to account for
size of each layer. It is also called 'Streamgraph'-layout. More
details can be found at http://leebyron.com/streamgraph/.
labels : list of str, optional
A sequence of labels to assign to each data series. If unspecified,
then no labels will be applied to artists.
colors : list of :mpltype:`color`, optional
A sequence of colors to be cycled through and used to color the stacked
areas. The sequence need not be exactly the same length as the number
of provided *y*, in which case the colors will repeat from the
beginning.
If not specified, the colors from the Axes property cycle will be used.
hatch : list of str, default: None
A sequence of hatching styles. See
:doc:`/gallery/shapes_and_collections/hatch_style_reference`.
The sequence will be cycled through for filling the
stacked areas from bottom to top.
It need not be exactly the same length as the number
of provided *y*, in which case the styles will repeat from the
beginning.
.. versionadded:: 3.9
Support for list input
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
All other keyword arguments are passed to `.Axes.fill_between`.
Returns
-------
list of `.PolyCollection`
A list of `.PolyCollection` instances, one for each element in the
stacked area plot.
"""
y = np.vstack(args)
labels = iter(labels)
if colors is not None:
colors = itertools.cycle(colors)
else:
colors = (axes._get_lines.get_next_color() for _ in y)
if hatch is None or isinstance(hatch, str):
hatch = itertools.cycle([hatch])
else:
hatch = itertools.cycle(hatch)
# Assume data passed has not been 'stacked', so stack it here.
# We'll need a float buffer for the upcoming calculations.
stack = np.cumsum(y, axis=0, dtype=np.promote_types(y.dtype, np.float32))
_api.check_in_list(['zero', 'sym', 'wiggle', 'weighted_wiggle'],
baseline=baseline)
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
total = np.sum(y, 0)
# multiply by 1/total (or zero) to avoid infinities in the division:
inv_total = np.zeros_like(total)
mask = total > 0
inv_total[mask] = 1.0 / total[mask]
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size * inv_total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
# Color between x = 0 and the first array.
coll = axes.fill_between(x, first_line, stack[0, :],
facecolor=next(colors),
hatch=next(hatch),
label=next(labels, None),
**kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
# Color between array i-1 and array i
for i in range(len(y) - 1):
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=next(colors),
hatch=next(hatch),
label=next(labels, None),
**kwargs))
return r
venv\Lib\site-packages\matplotlib\streamplot.py
"""
Streamline plotting for 2D vector fields.
"""
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cm, patches
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.lines as mlines
__all__ = ['streamplot']
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None, zorder=None, start_points=None,
maxlength=4.0, integration_direction='both',
broken_streamlines=True):
"""
Draw streamlines of a vector flow.
Parameters
----------
x, y : 1D/2D arrays
Evenly spaced strictly increasing arrays to make a grid. If 2D, all
rows of *x* must be equal and all columns of *y* must be equal; i.e.,
they must be as if generated by ``np.meshgrid(x_1d, y_1d)``.
u, v : 2D arrays
*x* and *y*-velocities. The number of rows and columns must match
the length of *y* and *x*, respectively.
density : float or (float, float)
Controls the closeness of streamlines. When ``density = 1``, the domain
is divided into a 30x30 grid. *density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use a tuple
(density_x, density_y).
linewidth : float or 2D array
The width of the streamlines. With a 2D array the line width can be
varied across the grid. The array must have the same shape as *u*
and *v*.
color : :mpltype:`color` or 2D array
The streamline color. If given an array, its values are converted to
colors using *cmap* and *norm*. The array must have the same shape
as *u* and *v*.
cmap, norm
Data normalization and colormapping parameters for *color*; only used
if *color* is an array of floats. See `~.Axes.imshow` for a detailed
description.
arrowsize : float
Scaling factor for the arrow size.
arrowstyle : str
Arrow style specification.
See `~matplotlib.patches.FancyArrowPatch`.
minlength : float
Minimum length of streamline in axes coordinates.
start_points : (N, 2) array
Coordinates of starting points for the streamlines in data coordinates
(the same coordinates as the *x* and *y* arrays).
zorder : float
The zorder of the streamlines and arrows.
Artists with lower zorder values are drawn first.
maxlength : float
Maximum length of streamline in axes coordinates.
integration_direction : {'forward', 'backward', 'both'}, default: 'both'
Integrate the streamline in forward, backward or both directions.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
broken_streamlines : boolean, default: True
If False, forces streamlines to continue until they
leave the plot domain. If True, they may be terminated if they
come too close to another streamline.
Returns
-------
StreamplotSet
Container object with attributes
- ``lines``: `.LineCollection` of streamlines
- ``arrows``: `.PatchCollection` containing `.FancyArrowPatch`
objects representing the arrows half-way along streamlines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(density)
dmap = DomainMap(grid, mask)
if zorder is None:
zorder = mlines.Line2D.zorder
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = axes._get_lines.get_next_color()
if linewidth is None:
linewidth = mpl.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
_api.check_in_list(['both', 'forward', 'backward'],
integration_direction=integration_direction)
if integration_direction == 'both':
maxlength /= 2.
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
if color.shape != grid.shape:
raise ValueError("If 'color' is given, it must match the shape of "
"the (x, y) grid")
line_colors = [[]] # Empty entry allows concatenation of zero arrays.
color = np.ma.masked_invalid(color)
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
if linewidth.shape != grid.shape:
raise ValueError("If 'linewidth' is given, it must match the "
"shape of the (x, y) grid")
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
line_kw['zorder'] = zorder
arrow_kw['zorder'] = zorder
# Sanity checks.
if u.shape != grid.shape or v.shape != grid.shape:
raise ValueError("'u' and 'v' must match the shape of the (x, y) grid")
u = np.ma.masked_invalid(u)
v = np.ma.masked_invalid(v)
integrate = _get_integrator(u, v, dmap, minlength, maxlength,
integration_direction)
trajectories = []
if start_points is None:
for xm, ym in _gen_starting_points(mask.shape):
if mask[ym, xm] == 0:
xg, yg = dmap.mask2grid(xm, ym)
t = integrate(xg, yg, broken_streamlines)
if t is not None:
trajectories.append(t)
else:
sp2 = np.asanyarray(start_points, dtype=float).copy()
# Check if start_points are outside the data boundaries
for xs, ys in sp2:
if not (grid.x_origin <= xs <= grid.x_origin + grid.width and
grid.y_origin <= ys <= grid.y_origin + grid.height):
raise ValueError(f"Starting point ({xs}, {ys}) outside of "
"data boundaries")
# Convert start_points from data to array coords
# Shift the seed points from the bottom left of the data so that
# data2grid works properly.
sp2[:, 0] -= grid.x_origin
sp2[:, 1] -= grid.y_origin
for xs, ys in sp2:
xg, yg = dmap.data2grid(xs, ys)
# Floating point issues can cause xg, yg to be slightly out of
# bounds for xs, ys on the upper boundaries. Because we have
# already checked that the starting points are within the original
# grid, clip the xg, yg to the grid to work around this issue
xg = np.clip(xg, 0, grid.nx - 1)
yg = np.clip(yg, 0, grid.ny - 1)
t = integrate(xg, yg, broken_streamlines)
if t is not None:
trajectories.append(t)
if use_multicolor_lines:
if norm is None:
norm = mcolors.Normalize(color.min(), color.max())
cmap = cm._ensure_cmap(cmap)
streamlines = []
arrows = []
for t in trajectories:
tgx, tgy = t.T
# Rescale from grid-coordinates to data-coordinates.
tx, ty = dmap.grid2data(tgx, tgy)
tx += grid.x_origin
ty += grid.y_origin
# Create multiple tiny segments if varying width or color is given
if isinstance(linewidth, np.ndarray) or use_multicolor_lines:
points = np.transpose([tx, ty]).reshape(-1, 1, 2)
streamlines.extend(np.hstack([points[:-1], points[1:]]))
else:
points = np.transpose([tx, ty])
streamlines.append(points)
# Add arrows halfway along each trajectory.
s = np.cumsum(np.hypot(np.diff(tx), np.diff(ty)))
n = np.searchsorted(s, s[-1] / 2.)
arrow_tail = (tx[n], ty[n])
arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
if isinstance(linewidth, np.ndarray):
line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
line_kw['linewidth'].extend(line_widths)
arrow_kw['linewidth'] = line_widths[n]
if use_multicolor_lines:
color_values = interpgrid(color, tgx, tgy)[:-1]
line_colors.append(color_values)
arrow_kw['color'] = cmap(norm(color_values[n]))
p = patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, **arrow_kw)
arrows.append(p)
lc = mcollections.LineCollection(
streamlines, transform=transform, **line_kw)
lc.sticky_edges.x[:] = [grid.x_origin, grid.x_origin + grid.width]
lc.sticky_edges.y[:] = [grid.y_origin, grid.y_origin + grid.height]
if use_multicolor_lines:
lc.set_array(np.ma.hstack(line_colors))
lc.set_cmap(cmap)
lc.set_norm(norm)
axes.add_collection(lc)
ac = mcollections.PatchCollection(arrows)
# Adding the collection itself is broken; see #2341.
for p in arrows:
axes.add_patch(p)
axes.autoscale_view()
stream_container = StreamplotSet(lc, ac)
return stream_container
class StreamplotSet:
def __init__(self, lines, arrows):
self.lines = lines
self.arrows = arrows
# Coordinate definitions
# ========================
class DomainMap:
"""
Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
# Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = (mask.nx - 1) / (grid.nx - 1)
self.y_grid2mask = (mask.ny - 1) / (grid.ny - 1)
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = 1. / grid.dx
self.y_data2grid = 1. / grid.dy
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return round(xi * self.x_grid2mask), round(yi * self.y_grid2mask)
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def grid2data(self, xg, yg):
return xg / self.x_data2grid, yg / self.y_data2grid
def start_trajectory(self, xg, yg, broken_streamlines=True):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym, broken_streamlines)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg, broken_streamlines=True):
if not self.grid.within_grid(xg, yg):
raise InvalidIndexError
xm, ym = self.grid2mask(xg, yg)
self.mask._update_trajectory(xm, ym, broken_streamlines)
def undo_trajectory(self):
self.mask._undo_trajectory()
class Grid:
"""Grid of data."""
def __init__(self, x, y):
if np.ndim(x) == 1:
pass
elif np.ndim(x) == 2:
x_row = x[0]
if not np.allclose(x_row, x):
raise ValueError("The rows of 'x' must be equal")
x = x_row
else:
raise ValueError("'x' can have at maximum 2 dimensions")
if np.ndim(y) == 1:
pass
elif np.ndim(y) == 2:
yt = np.transpose(y) # Also works for nested lists.
y_col = yt[0]
if not np.allclose(y_col, yt):
raise ValueError("The columns of 'y' must be equal")
y = y_col
else:
raise ValueError("'y' can have at maximum 2 dimensions")
if not (np.diff(x) > 0).all():
raise ValueError("'x' must be strictly increasing")
if not (np.diff(y) > 0).all():
raise ValueError("'y' must be strictly increasing")
self.nx = len(x)
self.ny = len(y)
self.dx = x[1] - x[0]
self.dy = y[1] - y[0]
self.x_origin = x[0]
self.y_origin = y[0]
self.width = x[-1] - x[0]
self.height = y[-1] - y[0]
if not np.allclose(np.diff(x), self.width / (self.nx - 1)):
raise ValueError("'x' values must be equally spaced")
if not np.allclose(np.diff(y), self.height / (self.ny - 1)):
raise ValueError("'y' values must be equally spaced")
@property
def shape(self):
return self.ny, self.nx
def within_grid(self, xi, yi):
"""Return whether (*xi*, *yi*) is a valid index of the grid."""
# Note that xi/yi can be floats; so, for example, we can't simply check
# `xi < self.nx` since *xi* can be `self.nx - 1 < xi < self.nx`
return 0 <= xi <= self.nx - 1 and 0 <= yi <= self.ny - 1
class StreamMask:
"""
Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
try:
self.nx, self.ny = (30 * np.broadcast_to(density, 2)).astype(int)
except ValueError as err:
raise ValueError("'density' must be a scalar or be of length "
"2") from err
if self.nx < 0 or self.ny < 0:
raise ValueError("'density' must be positive")
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, args):
return self._mask[args]
def _start_trajectory(self, xm, ym, broken_streamlines=True):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym, broken_streamlines)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask[t] = 0
def _update_trajectory(self, xm, ym, broken_streamlines=True):
"""
Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
if self._current_xy != (xm, ym):
if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
else:
if broken_streamlines:
raise InvalidIndexError
else:
pass
class InvalidIndexError(Exception):
pass
class TerminateTrajectory(Exception):
pass
# Integrator definitions
# =======================
def _get_integrator(u, v, dmap, minlength, maxlength, integration_direction):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / (dmap.grid.nx - 1)
v_ax = v / (dmap.grid.ny - 1)
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
if not dmap.grid.within_grid(xi, yi):
raise OutOfBounds
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def backward_time(xi, yi):
dxi, dyi = forward_time(xi, yi)
return -dxi, -dyi
def integrate(x0, y0, broken_streamlines=True):
"""
Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
stotal, xy_traj = 0., []
try:
dmap.start_trajectory(x0, y0, broken_streamlines)
except InvalidIndexError:
return None
if integration_direction in ['both', 'backward']:
s, xyt = _integrate_rk12(x0, y0, dmap, backward_time, maxlength,
broken_streamlines)
stotal += s
xy_traj += xyt[::-1]
if integration_direction in ['both', 'forward']:
dmap.reset_start_point(x0, y0)
s, xyt = _integrate_rk12(x0, y0, dmap, forward_time, maxlength,
broken_streamlines)
stotal += s
xy_traj += xyt[1:]
if stotal > minlength:
return np.broadcast_arrays(xy_traj, np.empty((1, 2)))[0]
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
class OutOfBounds(IndexError):
pass
def _integrate_rk12(x0, y0, dmap, f, maxlength, broken_streamlines=True):
"""
2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as RK4 and RK45 solvers (using
similar Python implementations) in most setups.
"""
# This error is below that needed to match the RK4 integrator. It
# is set for visual reasons -- too low and corners start
# appearing ugly and jagged. Can be tuned.
maxerror = 0.003
# This limit is important (for all integrators) to avoid the
# trajectory skipping some mask cells. We could relax this
# condition if we use the code which is commented out below to
# increment the location gradually. However, due to the efficient
# nature of the interpolation, this doesn't boost speed by much
# for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
ds = maxds
stotal = 0
xi = x0
yi = y0
xyf_traj = []
while True:
try:
if dmap.grid.within_grid(xi, yi):
xyf_traj.append((xi, yi))
else:
raise OutOfBounds
# Compute the two intermediate gradients.
# f should raise OutOfBounds if the locations given are
# outside the grid.
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x, yi + ds * k1y)
except OutOfBounds:
# Out of the domain during this step.
# Take an Euler step to the boundary to improve neatness
# unless the trajectory is currently empty.
if xyf_traj:
ds, xyf_traj = _euler_step(xyf_traj, dmap, f)
stotal += ds
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
ny, nx = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.hypot((dx2 - dx1) / (nx - 1), (dy2 - dy1) / (ny - 1))
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
try:
dmap.update_trajectory(xi, yi, broken_streamlines)
except InvalidIndexError:
break
if stotal + ds > maxlength:
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xyf_traj
def _euler_step(xyf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi, yi = xyf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xyf_traj.append((xi + cx * ds, yi + cy * ds))
return ds, xyf_traj
# Utility functions
# ========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(int)
y = yi.astype(int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = int(xi)
y = int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 1):
xn = x
else:
xn = x + 1
if y == (Ny - 1):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(shape):
"""
Yield starting points for streamlines.
Trying points on the boundary first gives higher quality streamlines.
This algorithm starts with a point on the mask corner and spirals inward.
This algorithm is inefficient, but fast compared to rest of streamplot.
"""
ny, nx = shape
xfirst = 0
yfirst = 1
xlast = nx - 1
ylast = ny - 1
x, y = 0, 0
direction = 'right'
for i in range(nx * ny):
yield x, y
if direction == 'right':
x += 1
if x >= xlast:
xlast -= 1
direction = 'up'
elif direction == 'up':
y += 1
if y >= ylast:
ylast -= 1
direction = 'left'
elif direction == 'left':
x -= 1
if x <= xfirst:
xfirst += 1
direction = 'down'
elif direction == 'down':
y -= 1
if y <= yfirst:
yfirst += 1
direction = 'right'
venv\Lib\site-packages\matplotlib\table.py
# Original code by:
# John Gill
# Copyright 2004 John Gill and John Hunter
#
# Subsequent changes:
# The Matplotlib development team
# Copyright The Matplotlib development team
"""
Tables drawing.
.. note::
The table implementation in Matplotlib is lightly maintained. For a more
featureful table implementation, you may wish to try `blume
`_.
Use the factory function `~matplotlib.table.table` to create a ready-made
table from texts. If you need more control, use the `.Table` class and its
methods.
The table consists of a grid of cells, which are indexed by (row, column).
The cell (0, 0) is positioned at the top left.
Thanks to John Gill for providing the class and table.
"""
import numpy as np
from . import _api, _docstring
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from .text import Text
from .transforms import Bbox
from .path import Path
from .cbook import _is_pandas_dataframe
class Cell(Rectangle):
"""
A cell is a `.Rectangle` with some associated `.Text`.
As a user, you'll most likely not creates cells yourself. Instead, you
should use either the `~matplotlib.table.table` factory function or
`.Table.add_cell`.
"""
PAD = 0.1
"""Padding between text and rectangle."""
_edges = 'BRTL'
_edge_aliases = {'open': '',
'closed': _edges, # default
'horizontal': 'BT',
'vertical': 'RL'
}
def __init__(self, xy, width, height, *,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc='right',
fontproperties=None,
visible_edges='closed',
):
"""
Parameters
----------
xy : 2-tuple
The position of the bottom left corner of the cell.
width : float
The cell width.
height : float
The cell height.
edgecolor : :mpltype:`color`, default: 'k'
The color of the cell border.
facecolor : :mpltype:`color`, default: 'w'
The cell facecolor.
fill : bool, default: True
Whether the cell background is filled.
text : str, optional
The cell text.
loc : {'right', 'center', 'left'}
The alignment of the text within the cell.
fontproperties : dict, optional
A dict defining the font properties of the text. Supported keys and
values are the keyword arguments accepted by `.FontProperties`.
visible_edges : {'closed', 'open', 'horizontal', 'vertical'} or \
substring of 'BRTL'
The cell edges to be drawn with a line: a substring of 'BRTL'
(bottom, right, top, left), or one of 'open' (no edges drawn),
'closed' (all edges drawn), 'horizontal' (bottom and top),
'vertical' (right and left).
"""
# Call base
super().__init__(xy, width=width, height=height, fill=fill,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
self.visible_edges = visible_edges
# Create text object
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], clip_on=False,
text=text, fontproperties=fontproperties,
horizontalalignment=loc, verticalalignment='center')
def set_transform(self, t):
super().set_transform(t)
# the text does not get the transform!
self.stale = True
def set_figure(self, fig):
super().set_figure(fig)
self._text.set_figure(fig)
def get_text(self):
"""Return the cell `.Text` instance."""
return self._text
def set_fontsize(self, size):
"""Set the text fontsize."""
self._text.set_fontsize(size)
self.stale = True
def get_fontsize(self):
"""Return the cell fontsize."""
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
"""Shrink font size until the text fits into the cell width."""
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
super().draw(renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
self.stale = False
def _set_text_position(self, renderer):
"""Set text up so it is drawn in the right place."""
bbox = self.get_window_extent(renderer)
# center vertically
y = bbox.y0 + bbox.height / 2
# position horizontally
loc = self._text.get_horizontalalignment()
if loc == 'center':
x = bbox.x0 + bbox.width / 2
elif loc == 'left':
x = bbox.x0 + bbox.width * self.PAD
else: # right.
x = bbox.x0 + bbox.width * (1 - self.PAD)
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
"""
Return the text bounds as *(x, y, width, height)* in table coordinates.
"""
return (self._text.get_window_extent(renderer)
.transformed(self.get_data_transform().inverted())
.bounds)
def get_required_width(self, renderer):
"""Return the minimal required width for the cell."""
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
@_docstring.interpd
def set_text_props(self, **kwargs):
"""
Update the text properties.
Valid keyword arguments are:
%(Text:kwdoc)s
"""
self._text._internal_update(kwargs)
self.stale = True
@property
def visible_edges(self):
"""
The cell edges to be drawn with a line.
Reading this property returns a substring of 'BRTL' (bottom, right,
top, left').
When setting this property, you can use a substring of 'BRTL' or one
of {'open', 'closed', 'horizontal', 'vertical'}.
"""
return self._visible_edges
@visible_edges.setter
def visible_edges(self, value):
if value is None:
self._visible_edges = self._edges
elif value in self._edge_aliases:
self._visible_edges = self._edge_aliases[value]
else:
if any(edge not in self._edges for edge in value):
raise ValueError('Invalid edge param {}, must only be one of '
'{} or string of {}'.format(
value,
", ".join(self._edge_aliases),
", ".join(self._edges)))
self._visible_edges = value
self.stale = True
def get_path(self):
"""Return a `.Path` for the `.visible_edges`."""
codes = [Path.MOVETO]
codes.extend(
Path.LINETO if edge in self._visible_edges else Path.MOVETO
for edge in self._edges)
if Path.MOVETO not in codes[1:]: # All sides are visible
codes[-1] = Path.CLOSEPOLY
return Path(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
codes,
readonly=True
)
CustomCell = Cell # Backcompat. alias.
class Table(Artist):
"""
A table of cells.
The table consists of a grid of cells, which are indexed by (row, column).
For a simple table, you'll have a full grid of cells with indices from
(0, 0) to (num_rows-1, num_cols-1), in which the cell (0, 0) is positioned
at the top left. However, you can also add cells with negative indices.
You don't have to add a cell to every grid position, so you can create
tables that have holes.
*Note*: You'll usually not create an empty table from scratch. Instead use
`~matplotlib.table.table` to create a table from data.
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
"""Possible values where to place the table relative to the Axes."""
FONTSIZE = 10
AXESPAD = 0.02
"""The border between the Axes and the table edge in Axes units."""
def __init__(self, ax, loc=None, bbox=None, **kwargs):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` to plot the table into.
loc : str, optional
The position of the cell with respect to *ax*. This must be one of
the `~.Table.codes`.
bbox : `.Bbox` or [xmin, ymin, width, height], optional
A bounding box to draw the table into. If this is not *None*, this
overrides *loc*.
Other Parameters
----------------
**kwargs
`.Artist` properties.
"""
super().__init__()
if isinstance(loc, str):
if loc not in self.codes:
raise ValueError(
"Unrecognized location {!r}. Valid locations are\n\t{}"
.format(loc, '\n\t'.join(self.codes)))
loc = self.codes[loc]
self.set_figure(ax.get_figure(root=False))
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
ax._unstale_viewLim()
self.set_transform(ax.transAxes)
self._cells = {}
self._edges = None
self._autoColumns = []
self._autoFontsize = True
self._internal_update(kwargs)
self.set_clip_on(False)
def add_cell(self, row, col, *args, **kwargs):
"""
Create a cell and add it to the table.
Parameters
----------
row : int
Row index.
col : int
Column index.
*args, **kwargs
All other parameters are passed on to `Cell`.
Returns
-------
`.Cell`
The created cell.
"""
xy = (0, 0)
cell = Cell(xy, visible_edges=self.edges, *args, **kwargs)
self[row, col] = cell
return cell
def __setitem__(self, position, cell):
"""
Set a custom cell in a given position.
"""
_api.check_isinstance(Cell, cell=cell)
try:
row, col = position[0], position[1]
except Exception as err:
raise KeyError('Only tuples length 2 are accepted as '
'coordinates') from err
cell.set_figure(self.get_figure(root=False))
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[row, col] = cell
self.stale = True
def __getitem__(self, position):
"""Retrieve a custom cell from a given position."""
return self._cells[position]
@property
def edges(self):
"""
The default value of `~.Cell.visible_edges` for newly added
cells using `.add_cell`.
Notes
-----
This setting does currently only affect newly created cells using
`.add_cell`.
To change existing cells, you have to set their edges explicitly::
for c in tab.get_celld().values():
c.visible_edges = 'horizontal'
"""
return self._edges
@edges.setter
def edges(self, value):
self._edges = value
self.stale = True
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.get_figure(root=True).dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# docstring inherited
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible():
return
renderer.open_group('table', gid=self.get_gid())
self._update_positions(renderer)
for key in sorted(self._cells):
self._cells[key].draw(renderer)
renderer.close_group('table')
self.stale = False
def _get_grid_bbox(self, renderer):
"""
Get a bbox, in axes coordinates for the cells.
Only include those in the range (0, 0) to (maxRow, maxCol).
"""
boxes = [cell.get_window_extent(renderer)
for (row, col), cell in self._cells.items()
if row >= 0 and col >= 0]
bbox = Bbox.union(boxes)
return bbox.transformed(self.get_transform().inverted())
def contains(self, mouseevent):
# docstring inherited
if self._different_canvas(mouseevent):
return False, {}
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
renderer = self.get_figure(root=True)._get_renderer()
if renderer is not None:
boxes = [cell.get_window_extent(renderer)
for (row, col), cell in self._cells.items()
if row >= 0 and col >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
"""Return the Artists contained by the table."""
return list(self._cells.values())
def get_window_extent(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
self._update_positions(renderer)
boxes = [cell.get_window_extent(renderer)
for cell in self._cells.values()]
return Bbox.union(boxes)
def _do_cell_alignment(self):
"""
Calculate row heights and column widths; position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.items():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
for col in sorted(widths):
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
for row in sorted(heights, reverse=True):
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.items():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
"""
Automatically set the widths of given columns to optimal sizes.
Parameters
----------
col : int or sequence of ints
The indices of the columns to auto-scale.
"""
col1d = np.atleast_1d(col)
if not np.issubdtype(col1d.dtype, np.integer):
raise TypeError("col must be an int or sequence of ints.")
for cell in col1d:
self._autoColumns.append(cell)
self.stale = True
def _auto_set_column_width(self, col, renderer):
"""Automatically set width for column."""
cells = [cell for key, cell in self._cells.items() if key[1] == col]
max_width = max((cell.get_required_width(renderer) for cell in cells),
default=0)
for cell in cells:
cell.set_width(max_width)
def auto_set_font_size(self, value=True):
"""Automatically set font size."""
self._autoFontsize = value
self.stale = True
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = next(iter(self._cells.values())).get_fontsize()
cells = []
for key, cell in self._cells.items():
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.values():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
"""Scale column widths by *xscale* and row heights by *yscale*."""
for c in self._cells.values():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the font size, in points, of the cell text.
Parameters
----------
size : float
Notes
-----
As long as auto font size has not been disabled, the value will be
clipped such that the text fits horizontally into the cell.
You can disable this behavior using `.auto_set_font_size`.
>>> the_table.auto_set_font_size(False)
>>> the_table.set_fontsize(20)
However, there is no automatic scaling of the row height so that the
text may exceed the cell boundary.
"""
for cell in self._cells.values():
cell.set_fontsize(size)
self.stale = True
def _offset(self, ox, oy):
"""Move all the artists by ox, oy (axes coords)."""
for c in self._cells.values():
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
if isinstance(self._bbox, Bbox):
rl, rb, rw, rh = self._bbox.bounds
else:
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
r"""
Return a dict of cells in the table mapping *(row, column)* to
`.Cell`\s.
Notes
-----
You can also directly index into the Table object to access individual
cells::
cell = table[row, col]
"""
return self._cells
@_docstring.interpd
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed',
**kwargs):
"""
Add a table to an `~.axes.Axes`.
At least one of *cellText* or *cellColours* must be specified. These
parameters must be 2D lists, in which the outer lists define the rows and
the inner list define the column values per row. Each row must have the
same number of elements.
The table can optionally have row and column headers, which are configured
using *rowLabels*, *rowColours*, *rowLoc* and *colLabels*, *colColours*,
*colLoc* respectively.
For finer grained control over tables, use the `.Table` class and add it to
the Axes with `.Axes.add_table`.
Parameters
----------
cellText : 2D list of str or pandas.DataFrame, optional
The texts to place into the table cells.
*Note*: Line breaks in the strings are currently not accounted for and
will result in the text exceeding the cell boundaries.
cellColours : 2D list of :mpltype:`color`, optional
The background colors of the cells.
cellLoc : {'right', 'center', 'left'}
The alignment of the text within the cells.
colWidths : list of float, optional
The column widths in units of the axes. If not given, all columns will
have a width of *1 / ncols*.
rowLabels : list of str, optional
The text of the row header cells.
rowColours : list of :mpltype:`color`, optional
The colors of the row header cells.
rowLoc : {'left', 'center', 'right'}
The text alignment of the row header cells.
colLabels : list of str, optional
The text of the column header cells.
colColours : list of :mpltype:`color`, optional
The colors of the column header cells.
colLoc : {'center', 'left', 'right'}
The text alignment of the column header cells.
loc : str, default: 'bottom'
The position of the cell with respect to *ax*. This must be one of
the `~.Table.codes`.
bbox : `.Bbox` or [xmin, ymin, width, height], optional
A bounding box to draw the table into. If this is not *None*, this
overrides *loc*.
edges : {'closed', 'open', 'horizontal', 'vertical'} or substring of 'BRTL'
The cell edges to be drawn with a line. See also
`~.Cell.visible_edges`.
Returns
-------
`~matplotlib.table.Table`
The created table.
Other Parameters
----------------
**kwargs
`.Table` properties.
%(Table:kwdoc)s
"""
if cellColours is None and cellText is None:
raise ValueError('At least one argument from "cellColours" or '
'"cellText" must be provided to create a table.')
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * cols] * rows
# Check if we have a Pandas DataFrame
if _is_pandas_dataframe(cellText):
# if rowLabels/colLabels are empty, use DataFrame entries.
# Otherwise, throw an error.
if rowLabels is None:
rowLabels = cellText.index
else:
raise ValueError("rowLabels cannot be used alongside Pandas DataFrame")
if colLabels is None:
colLabels = cellText.columns
else:
raise ValueError("colLabels cannot be used alongside Pandas DataFrame")
# Update cellText with only values
cellText = cellText.values
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
if len(row) != cols:
raise ValueError(f"Each row in 'cellText' must have {cols} "
"columns")
if cellColours is not None:
if len(cellColours) != rows:
raise ValueError(f"'cellColours' must have {rows} rows")
for row in cellColours:
if len(row) != cols:
raise ValueError("Each row in 'cellColours' must have "
f"{cols} columns")
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Fill in missing information for column
# and row labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * rows
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
if len(rowLabels) != rows:
raise ValueError(f"'rowLabels' must be of length {rows}")
# If we have column labels, need to shift
# the text and colour arrays down 1 row
offset = 1
if colLabels is None:
if colColours is not None:
colLabels = [''] * cols
else:
offset = 0
elif colColours is None:
colColours = 'w' * cols
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox, **kwargs)
table.edges = edges
height = table._approx_text_height()
# Add the cells
for row in range(rows):
for col in range(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in range(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in range(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
# set_fontsize is only effective after cells are added
if "fontsize" in kwargs:
table.set_fontsize(kwargs["fontsize"])
ax.add_table(table)
return table
venv\Lib\site-packages\matplotlib\texmanager.py
r"""
Support for embedded TeX expressions in Matplotlib.
Requirements:
* LaTeX.
* \*Agg backends: dvipng>=1.6.
* PS backend: PSfrag, dvips, and Ghostscript>=9.0.
* PDF and SVG backends: if LuaTeX is present, it will be used to speed up some
post-processing steps, but note that it is not used to parse the TeX string
itself (only LaTeX is supported).
To enable TeX rendering of all text in your Matplotlib figure, set
:rc:`text.usetex` to True.
TeX and dvipng/dvips processing results are cached
in ~/.matplotlib/tex.cache for reuse between sessions.
`TexManager.get_rgba` can also be used to directly obtain raster output as RGBA
NumPy arrays.
"""
import functools
import hashlib
import logging
import os
from pathlib import Path
import subprocess
from tempfile import TemporaryDirectory
import numpy as np
import matplotlib as mpl
from matplotlib import cbook, dviread
_log = logging.getLogger(__name__)
def _usepackage_if_not_loaded(package, *, option=None):
"""
Output LaTeX code that loads a package (possibly with an option) if it
hasn't been loaded yet.
LaTeX cannot load twice a package with different options, so this helper
can be used to protect against users loading arbitrary packages/options in
their custom preamble.
"""
option = f"[{option}]" if option is not None else ""
return (
r"\makeatletter"
r"\@ifpackageloaded{%(package)s}{}{\usepackage%(option)s{%(package)s}}"
r"\makeatother"
) % {"package": package, "option": option}
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a directory.
The cache directory is called ``tex.cache`` and is located in the directory
returned by `.get_cachedir`.
Repeated calls to this constructor always return the same instance.
"""
_texcache = os.path.join(mpl.get_cachedir(), 'tex.cache')
_grey_arrayd = {}
_font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
_font_preambles = {
'new century schoolbook': r'\renewcommand{\rmdefault}{pnc}',
'bookman': r'\renewcommand{\rmdefault}{pbk}',
'times': r'\usepackage{mathptmx}',
'palatino': r'\usepackage{mathpazo}',
'zapf chancery': r'\usepackage{chancery}',
'cursive': r'\usepackage{chancery}',
'charter': r'\usepackage{charter}',
'serif': '',
'sans-serif': '',
'helvetica': r'\usepackage{helvet}',
'avant garde': r'\usepackage{avant}',
'courier': r'\usepackage{courier}',
# Loading the type1ec package ensures that cm-super is installed, which
# is necessary for Unicode computer modern. (It also allows the use of
# computer modern at arbitrary sizes, but that's just a side effect.)
'monospace': r'\usepackage{type1ec}',
'computer modern roman': r'\usepackage{type1ec}',
'computer modern sans serif': r'\usepackage{type1ec}',
'computer modern typewriter': r'\usepackage{type1ec}',
}
_font_types = {
'new century schoolbook': 'serif',
'bookman': 'serif',
'times': 'serif',
'palatino': 'serif',
'zapf chancery': 'cursive',
'charter': 'serif',
'helvetica': 'sans-serif',
'avant garde': 'sans-serif',
'courier': 'monospace',
'computer modern roman': 'serif',
'computer modern sans serif': 'sans-serif',
'computer modern typewriter': 'monospace',
}
@functools.lru_cache # Always return the same instance.
def __new__(cls):
Path(cls._texcache).mkdir(parents=True, exist_ok=True)
return object.__new__(cls)
@classmethod
def _get_font_family_and_reduced(cls):
"""Return the font family name and whether the font is reduced."""
ff = mpl.rcParams['font.family']
ff_val = ff[0].lower() if len(ff) == 1 else None
if len(ff) == 1 and ff_val in cls._font_families:
return ff_val, False
elif len(ff) == 1 and ff_val in cls._font_preambles:
return cls._font_types[ff_val], True
else:
_log.info('font.family must be one of (%s) when text.usetex is '
'True. serif will be used by default.',
', '.join(cls._font_families))
return 'serif', False
@classmethod
def _get_font_preamble_and_command(cls):
requested_family, is_reduced_font = cls._get_font_family_and_reduced()
preambles = {}
for font_family in cls._font_families:
if is_reduced_font and font_family == requested_family:
preambles[font_family] = cls._font_preambles[
mpl.rcParams['font.family'][0].lower()]
else:
rcfonts = mpl.rcParams[f"font.{font_family}"]
for i, font in enumerate(map(str.lower, rcfonts)):
if font in cls._font_preambles:
preambles[font_family] = cls._font_preambles[font]
_log.debug(
'family: %s, package: %s, font: %s, skipped: %s',
font_family, cls._font_preambles[font], rcfonts[i],
', '.join(rcfonts[:i]),
)
break
else:
_log.info('No LaTeX-compatible font found for the %s font'
'family in rcParams. Using default.',
font_family)
preambles[font_family] = cls._font_preambles[font_family]
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = {preambles[family]
for family in ['serif', 'sans-serif', 'monospace']}
if requested_family == 'cursive':
cmd.add(preambles['cursive'])
cmd.add(r'\usepackage{type1cm}')
preamble = '\n'.join(sorted(cmd))
fontcmd = (r'\sffamily' if requested_family == 'sans-serif' else
r'\ttfamily' if requested_family == 'monospace' else
r'\rmfamily')
return preamble, fontcmd
@classmethod
def get_basefile(cls, tex, fontsize, dpi=None):
"""
Return a filename based on a hash of the string, fontsize, and dpi.
"""
src = cls._get_tex_source(tex, fontsize) + str(dpi)
filehash = hashlib.sha256(
src.encode('utf-8'),
usedforsecurity=False
).hexdigest()
filepath = Path(cls._texcache)
num_letters, num_levels = 2, 2
for i in range(0, num_letters*num_levels, num_letters):
filepath = filepath / Path(filehash[i:i+2])
filepath.mkdir(parents=True, exist_ok=True)
return os.path.join(filepath, filehash)
@classmethod
def get_font_preamble(cls):
"""
Return a string containing font configuration for the tex preamble.
"""
font_preamble, command = cls._get_font_preamble_and_command()
return font_preamble
@classmethod
def get_custom_preamble(cls):
"""Return a string containing user additions to the tex preamble."""
return mpl.rcParams['text.latex.preamble']
@classmethod
def _get_tex_source(cls, tex, fontsize):
"""Return the complete TeX source for processing a TeX string."""
font_preamble, fontcmd = cls._get_font_preamble_and_command()
baselineskip = 1.25 * fontsize
return "\n".join([
r"\documentclass{article}",
r"% Pass-through \mathdefault, which is used in non-usetex mode",
r"% to use the default text font but was historically suppressed",
r"% in usetex mode.",
r"\newcommand{\mathdefault}[1]{#1}",
font_preamble,
r"\usepackage[utf8]{inputenc}",
r"\DeclareUnicodeCharacter{2212}{\ensuremath{-}}",
r"% geometry is loaded before the custom preamble as ",
r"% convert_psfrags relies on a custom preamble to change the ",
r"% geometry.",
r"\usepackage[papersize=72in, margin=1in]{geometry}",
cls.get_custom_preamble(),
r"% Use `underscore` package to take care of underscores in text.",
r"% The [strings] option allows to use underscores in file names.",
_usepackage_if_not_loaded("underscore", option="strings"),
r"% Custom packages (e.g. newtxtext) may already have loaded ",
r"% textcomp with different options.",
_usepackage_if_not_loaded("textcomp"),
r"\pagestyle{empty}",
r"\begin{document}",
r"% The empty hbox ensures that a page is printed even for empty",
r"% inputs, except when using psfrag which gets confused by it.",
r"% matplotlibbaselinemarker is used by dviread to detect the",
r"% last line's baseline.",
rf"\fontsize{{{fontsize}}}{{{baselineskip}}}%",
r"\ifdefined\psfrag\else\hbox{}\fi%",
rf"{{{fontcmd} {tex}}}%",
r"\end{document}",
])
@classmethod
def make_tex(cls, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size.
Return the file name.
"""
texfile = cls.get_basefile(tex, fontsize) + ".tex"
Path(texfile).write_text(cls._get_tex_source(tex, fontsize),
encoding='utf-8')
return texfile
@classmethod
def _run_checked_subprocess(cls, command, tex, *, cwd=None):
_log.debug(cbook._pformat_subprocess(command))
try:
report = subprocess.check_output(
command, cwd=cwd if cwd is not None else cls._texcache,
stderr=subprocess.STDOUT)
except FileNotFoundError as exc:
raise RuntimeError(
f'Failed to process string with tex because {command[0]} '
'could not be found') from exc
except subprocess.CalledProcessError as exc:
raise RuntimeError(
'{prog} was not able to process the following string:\n'
'{tex!r}\n\n'
'Here is the full command invocation and its output:\n\n'
'{format_command}\n\n'
'{exc}\n\n'.format(
prog=command[0],
format_command=cbook._pformat_subprocess(command),
tex=tex.encode('unicode_escape'),
exc=exc.output.decode('utf-8', 'backslashreplace'))
) from None
_log.debug(report)
return report
@classmethod
def make_dvi(cls, tex, fontsize):
"""
Generate a dvi file containing latex's layout of tex string.
Return the file name.
"""
basefile = cls.get_basefile(tex, fontsize)
dvifile = '%s.dvi' % basefile
if not os.path.exists(dvifile):
texfile = Path(cls.make_tex(tex, fontsize))
# Generate the dvi in a temporary directory to avoid race
# conditions e.g. if multiple processes try to process the same tex
# string at the same time. Having tmpdir be a subdirectory of the
# final output dir ensures that they are on the same filesystem,
# and thus replace() works atomically. It also allows referring to
# the texfile with a relative path (for pathological MPLCONFIGDIRs,
# the absolute path may contain characters (e.g. ~) that TeX does
# not support; n.b. relative paths cannot traverse parents, or it
# will be blocked when `openin_any = p` in texmf.cnf).
cwd = Path(dvifile).parent
with TemporaryDirectory(dir=cwd) as tmpdir:
tmppath = Path(tmpdir)
cls._run_checked_subprocess(
["latex", "-interaction=nonstopmode", "--halt-on-error",
f"--output-directory={tmppath.name}",
f"{texfile.name}"], tex, cwd=cwd)
(tmppath / Path(dvifile).name).replace(dvifile)
return dvifile
@classmethod
def make_png(cls, tex, fontsize, dpi):
"""
Generate a png file containing latex's rendering of tex string.
Return the file name.
"""
basefile = cls.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png' % basefile
# see get_rgba for a discussion of the background
if not os.path.exists(pngfile):
dvifile = cls.make_dvi(tex, fontsize)
cmd = ["dvipng", "-bg", "Transparent", "-D", str(dpi),
"-T", "tight", "-o", pngfile, dvifile]
# When testing, disable FreeType rendering for reproducibility; but
# dvipng 1.16 has a bug (fixed in f3ff241) that breaks --freetype0
# mode, so for it we keep FreeType enabled; the image will be
# slightly off.
if (getattr(mpl, "_called_from_pytest", False) and
mpl._get_executable_info("dvipng").raw_version != "1.16"):
cmd.insert(1, "--freetype0")
cls._run_checked_subprocess(cmd, tex)
return pngfile
@classmethod
def get_grey(cls, tex, fontsize=None, dpi=None):
"""Return the alpha channel."""
if not fontsize:
fontsize = mpl.rcParams['font.size']
if not dpi:
dpi = mpl.rcParams['savefig.dpi']
key = cls._get_tex_source(tex, fontsize), dpi
alpha = cls._grey_arrayd.get(key)
if alpha is None:
pngfile = cls.make_png(tex, fontsize, dpi)
rgba = mpl.image.imread(os.path.join(cls._texcache, pngfile))
cls._grey_arrayd[key] = alpha = rgba[:, :, -1]
return alpha
@classmethod
def get_rgba(cls, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):
r"""
Return latex's rendering of the tex string as an RGBA array.
Examples
--------
>>> texmanager = TexManager()
>>> s = r"\TeX\ is $\displaystyle\sum_n\frac{-e^{i\pi}}{2^n}$!"
>>> Z = texmanager.get_rgba(s, fontsize=12, dpi=80, rgb=(1, 0, 0))
"""
alpha = cls.get_grey(tex, fontsize, dpi)
rgba = np.empty((*alpha.shape, 4))
rgba[..., :3] = mpl.colors.to_rgb(rgb)
rgba[..., -1] = alpha
return rgba
@classmethod
def get_text_width_height_descent(cls, tex, fontsize, renderer=None):
"""Return width, height and descent of the text."""
if tex.strip() == '':
return 0, 0, 0
dvifile = cls.make_dvi(tex, fontsize)
dpi_fraction = renderer.points_to_pixels(1.) if renderer else 1
with dviread.Dvi(dvifile, 72 * dpi_fraction) as dvi:
page, = dvi
# A total height (including the descent) needs to be returned.
return page.width, page.height + page.descent, page.descent
venv\Lib\site-packages\matplotlib\text.py
"""
Classes for including text in a figure.
"""
import functools
import logging
import math
from numbers import Real
import weakref
import numpy as np
import matplotlib as mpl
from . import _api, artist, cbook, _docstring
from .artist import Artist
from .font_manager import FontProperties
from .patches import FancyArrowPatch, FancyBboxPatch, Rectangle
from .textpath import TextPath, TextToPath # noqa # Logically located here
from .transforms import (
Affine2D, Bbox, BboxBase, BboxTransformTo, IdentityTransform, Transform)
_log = logging.getLogger(__name__)
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text.
The bbox position takes text rotation into account, but the width and
height are those of the unrotated box (unlike `.Text.get_window_extent`).
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, the information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
projected_xs = []
projected_ys = []
theta = np.deg2rad(text.get_rotation())
tr = Affine2D().rotate(-theta)
_, parts, d = text._get_layout(renderer)
for t, wh, x, y in parts:
w, h = wh
xt1, yt1 = tr.transform((x, y))
yt1 -= d
xt2, yt2 = xt1 + w, yt1 + h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
x_box, y_box = Affine2D().rotate(theta).transform((xt_box, yt_box))
return x_box, y_box, w_box, h_box
def _get_text_metrics_with_cache(renderer, text, fontprop, ismath, dpi):
"""Call ``renderer.get_text_width_height_descent``, caching the results."""
# Cached based on a copy of fontprop so that later in-place mutations of
# the passed-in argument do not mess up the cache.
return _get_text_metrics_with_cache_impl(
weakref.ref(renderer), text, fontprop.copy(), ismath, dpi)
@functools.lru_cache(4096)
def _get_text_metrics_with_cache_impl(
renderer_ref, text, fontprop, ismath, dpi):
# dpi is unused, but participates in cache invalidation (via the renderer).
return renderer_ref().get_text_width_height_descent(text, fontprop, ismath)
@_docstring.interpd
@_api.define_aliases({
"color": ["c"],
"fontproperties": ["font", "font_properties"],
"fontfamily": ["family"],
"fontname": ["name"],
"fontsize": ["size"],
"fontstretch": ["stretch"],
"fontstyle": ["style"],
"fontvariant": ["variant"],
"fontweight": ["weight"],
"horizontalalignment": ["ha"],
"verticalalignment": ["va"],
"multialignment": ["ma"],
})
class Text(Artist):
"""Handle storing and drawing of text in window or data coordinates."""
zorder = 3
_charsize_cache = dict()
def __repr__(self):
return f"Text({self._x}, {self._y}, {self._text!r})"
def __init__(self,
x=0, y=0, text='', *,
color=None, # defaults to rc params
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
rotation_mode=None,
usetex=None, # defaults to rcParams['text.usetex']
wrap=False,
transform_rotates_text=False,
parse_math=None, # defaults to rcParams['text.parse_math']
antialiased=None, # defaults to rcParams['text.antialiased']
**kwargs
):
"""
Create a `.Text` instance at *x*, *y* with string *text*.
The text is aligned relative to the anchor point (*x*, *y*) according
to ``horizontalalignment`` (default: 'left') and ``verticalalignment``
(default: 'baseline'). See also
:doc:`/gallery/text_labels_and_annotations/text_alignment`.
While Text accepts the 'label' keyword argument, by default it is not
added to the handles of a legend.
Valid keyword arguments are:
%(Text:kwdoc)s
"""
super().__init__()
self._x, self._y = x, y
self._text = ''
self._reset_visual_defaults(
text=text,
color=color,
fontproperties=fontproperties,
usetex=usetex,
parse_math=parse_math,
wrap=wrap,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
rotation=rotation,
transform_rotates_text=transform_rotates_text,
linespacing=linespacing,
rotation_mode=rotation_mode,
antialiased=antialiased
)
self.update(kwargs)
def _reset_visual_defaults(
self,
text='',
color=None,
fontproperties=None,
usetex=None,
parse_math=None,
wrap=False,
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
rotation=None,
transform_rotates_text=False,
linespacing=None,
rotation_mode=None,
antialiased=None
):
self.set_text(text)
self.set_color(mpl._val_or_rc(color, "text.color"))
self.set_fontproperties(fontproperties)
self.set_usetex(usetex)
self.set_parse_math(mpl._val_or_rc(parse_math, 'text.parse_math'))
self.set_wrap(wrap)
self.set_verticalalignment(verticalalignment)
self.set_horizontalalignment(horizontalalignment)
self._multialignment = multialignment
self.set_rotation(rotation)
self._transform_rotates_text = transform_rotates_text
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self.set_linespacing(linespacing)
self.set_rotation_mode(rotation_mode)
self.set_antialiased(antialiased if antialiased is not None else
mpl.rcParams['text.antialiased'])
def update(self, kwargs):
# docstring inherited
ret = []
kwargs = cbook.normalize_kwargs(kwargs, Text)
sentinel = object() # bbox can be None, so use another sentinel.
# Update fontproperties first, as it has lowest priority.
fontproperties = kwargs.pop("fontproperties", sentinel)
if fontproperties is not sentinel:
ret.append(self.set_fontproperties(fontproperties))
# Update bbox last, as it depends on font properties.
bbox = kwargs.pop("bbox", sentinel)
ret.extend(super().update(kwargs))
if bbox is not sentinel:
ret.append(self.set_bbox(bbox))
return ret
def __getstate__(self):
d = super().__getstate__()
# remove the cached _renderer (if it exists)
d['_renderer'] = None
return d
def contains(self, mouseevent):
"""
Return whether the mouse event occurred inside the axis-aligned
bounding-box of the text.
"""
if (self._different_canvas(mouseevent) or not self.get_visible()
or self._renderer is None):
return False, {}
# Explicitly use Text.get_window_extent(self) and not
# self.get_window_extent() so that Annotation.contains does not
# accidentally cover the entire annotation bounding box.
bbox = Text.get_window_extent(self)
inside = (bbox.x0 <= mouseevent.x <= bbox.x1
and bbox.y0 <= mouseevent.y <= bbox.y1)
cattr = {}
# if the text has a surrounding patch, also check containment for it,
# and merge the results with the results for the text.
if self._bbox_patch:
patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
inside = inside or patch_inside
cattr["bbox_patch"] = patch_cattr
return inside, cattr
def _get_xy_display(self):
"""
Get the (possibly unit converted) transformed x, y in display coords.
"""
x, y = self.get_unitless_position()
return self.get_transform().transform((x, y))
def _get_multialignment(self):
if self._multialignment is not None:
return self._multialignment
else:
return self._horizontalalignment
def _char_index_at(self, x):
"""
Calculate the index closest to the coordinate x in display space.
The position of text[index] is assumed to be the sum of the widths
of all preceding characters text[:index].
This works only on single line texts.
"""
if not self._text:
return 0
text = self._text
fontproperties = str(self._fontproperties)
if fontproperties not in Text._charsize_cache:
Text._charsize_cache[fontproperties] = dict()
charsize_cache = Text._charsize_cache[fontproperties]
for char in set(text):
if char not in charsize_cache:
self.set_text(char)
bb = self.get_window_extent()
charsize_cache[char] = bb.x1 - bb.x0
self.set_text(text)
bb = self.get_window_extent()
size_accum = np.cumsum([0] + [charsize_cache[x] for x in text])
std_x = x - bb.x0
return (np.abs(size_accum - std_x)).argmin()
def get_rotation(self):
"""Return the text angle in degrees between 0 and 360."""
if self.get_transform_rotates_text():
return self.get_transform().transform_angles(
[self._rotation], [self.get_unitless_position()]).item(0)
else:
return self._rotation
def get_transform_rotates_text(self):
"""
Return whether rotations of the transform affect the text direction.
"""
return self._transform_rotates_text
def set_rotation_mode(self, m):
"""
Set text rotation mode.
Parameters
----------
m : {None, 'default', 'anchor'}
If ``"default"``, the text will be first rotated, then aligned according
to their horizontal and vertical alignments. If ``"anchor"``, then
alignment occurs before rotation. Passing ``None`` will set the rotation
mode to ``"default"``.
"""
if m is None:
m = "default"
else:
_api.check_in_list(("anchor", "default"), rotation_mode=m)
self._rotation_mode = m
self.stale = True
def get_rotation_mode(self):
"""Return the text rotation mode."""
return self._rotation_mode
def set_antialiased(self, antialiased):
"""
Set whether to use antialiased rendering.
Parameters
----------
antialiased : bool
Notes
-----
Antialiasing will be determined by :rc:`text.antialiased`
and the parameter *antialiased* will have no effect if the text contains
math expressions.
"""
self._antialiased = antialiased
self.stale = True
def get_antialiased(self):
"""Return whether antialiased rendering is used."""
return self._antialiased
def update_from(self, other):
# docstring inherited
super().update_from(other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._usetex = other._usetex
self._rotation = other._rotation
self._transform_rotates_text = other._transform_rotates_text
self._picker = other._picker
self._linespacing = other._linespacing
self._antialiased = other._antialiased
self.stale = True
def _get_layout(self, renderer):
"""
Return the extent (bbox) of the text together with
multiple-alignment information. Note that it returns an extent
of a rotated text when necessary.
"""
thisx, thisy = 0.0, 0.0
lines = self._get_wrapped_text().split("\n") # Ensures lines is not empty.
ws = []
hs = []
xs = []
ys = []
# Full vertical extent of font, including ascenders and descenders:
_, lp_h, lp_d = _get_text_metrics_with_cache(
renderer, "lp", self._fontproperties,
ismath="TeX" if self.get_usetex() else False,
dpi=self.get_figure(root=True).dpi)
min_dy = (lp_h - lp_d) * self._linespacing
for i, line in enumerate(lines):
clean_line, ismath = self._preprocess_math(line)
if clean_line:
w, h, d = _get_text_metrics_with_cache(
renderer, clean_line, self._fontproperties,
ismath=ismath, dpi=self.get_figure(root=True).dpi)
else:
w = h = d = 0
# For multiline text, increase the line spacing when the text
# net-height (excluding baseline) is larger than that of a "l"
# (e.g., use of superscripts), which seems what TeX does.
h = max(h, lp_h)
d = max(d, lp_d)
ws.append(w)
hs.append(h)
# Metrics of the last line that are needed later:
baseline = (h - d) - thisy
if i == 0:
# position at baseline
thisy = -(h - d)
else:
# put baseline a good distance from bottom of previous line
thisy -= max(min_dy, (h - d) * self._linespacing)
xs.append(thisx) # == 0.
ys.append(thisy)
thisy -= d
# Metrics of the last line that are needed later:
descent = d
# Bounding box definition:
width = max(ws)
xmin = 0
xmax = width
ymax = 0
ymin = ys[-1] - descent # baseline of last line minus its descent
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
# now offset the individual text lines within the box
malign = self._get_multialignment()
if malign == 'left':
offset_layout = [(x, y) for x, y in zip(xs, ys)]
elif malign == 'center':
offset_layout = [(x + width / 2 - w / 2, y)
for x, y, w in zip(xs, ys, ws)]
elif malign == 'right':
offset_layout = [(x + width - w, y)
for x, y, w in zip(xs, ys, ws)]
# the corners of the unrotated bounding box
corners_horiz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)])
# now rotate the bbox
corners_rotated = M.transform(corners_horiz)
# compute the bounds of the rotated box
xmin = corners_rotated[:, 0].min()
xmax = corners_rotated[:, 0].max()
ymin = corners_rotated[:, 1].min()
ymax = corners_rotated[:, 1].max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the target position offset the display
# bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
rotation_mode = self.get_rotation_mode()
if rotation_mode != "anchor":
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign == 'center':
offsetx = (xmin + xmax) / 2
elif halign == 'right':
offsetx = xmax
else:
offsetx = xmin
if valign == 'center':
offsety = (ymin + ymax) / 2
elif valign == 'top':
offsety = ymax
elif valign == 'baseline':
offsety = ymin + descent
elif valign == 'center_baseline':
offsety = ymin + height - baseline / 2.0
else:
offsety = ymin
else:
xmin1, ymin1 = corners_horiz[0]
xmax1, ymax1 = corners_horiz[2]
if halign == 'center':
offsetx = (xmin1 + xmax1) / 2.0
elif halign == 'right':
offsetx = xmax1
else:
offsetx = xmin1
if valign == 'center':
offsety = (ymin1 + ymax1) / 2.0
elif valign == 'top':
offsety = ymax1
elif valign == 'baseline':
offsety = ymax1 - baseline
elif valign == 'center_baseline':
offsety = ymax1 - baseline / 2.0
else:
offsety = ymin1
offsetx, offsety = M.transform((offsetx, offsety))
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first (x, y) position
xys = M.transform(offset_layout) - (offsetx, offsety)
return bbox, list(zip(lines, zip(ws, hs), *xys.T)), descent
def set_bbox(self, rectprops):
"""
Draw a bounding box around self.
Parameters
----------
rectprops : dict with properties for `.patches.FancyBboxPatch`
The default boxstyle is 'square'. The mutation
scale of the `.patches.FancyBboxPatch` is set to the fontsize.
Examples
--------
::
t.set_bbox(dict(facecolor='red', alpha=0.5))
"""
if rectprops is not None:
props = rectprops.copy()
boxstyle = props.pop("boxstyle", None)
pad = props.pop("pad", None)
if boxstyle is None:
boxstyle = "square"
if pad is None:
pad = 4 # points
pad /= self.get_size() # to fraction of font size
else:
if pad is None:
pad = 0.3
# boxstyle could be a callable or a string
if isinstance(boxstyle, str) and "pad" not in boxstyle:
boxstyle += ",pad=%0.2f" % pad
self._bbox_patch = FancyBboxPatch(
(0, 0), 1, 1,
boxstyle=boxstyle, transform=IdentityTransform(), **props)
else:
self._bbox_patch = None
self._update_clip_properties()
def get_bbox_patch(self):
"""
Return the bbox Patch, or None if the `.patches.FancyBboxPatch`
is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox.
This method should be used when the position and size of the bbox needs
to be updated before actually drawing the bbox.
"""
if self._bbox_patch:
# don't use self.get_unitless_position here, which refers to text
# position in Text:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = self.get_transform().transform((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
self._bbox_patch.set_transform(
Affine2D()
.rotate_deg(self.get_rotation())
.translate(posx + x_box, posy + y_box))
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
def _update_clip_properties(self):
if self._bbox_patch:
clipprops = dict(clip_box=self.clipbox,
clip_path=self._clippath,
clip_on=self._clipon)
self._bbox_patch.update(clipprops)
def set_clip_box(self, clipbox):
# docstring inherited.
super().set_clip_box(clipbox)
self._update_clip_properties()
def set_clip_path(self, path, transform=None):
# docstring inherited.
super().set_clip_path(path, transform)
self._update_clip_properties()
def set_clip_on(self, b):
# docstring inherited.
super().set_clip_on(b)
self._update_clip_properties()
def get_wrap(self):
"""Return whether the text can be wrapped."""
return self._wrap
def set_wrap(self, wrap):
"""
Set whether the text can be wrapped.
Wrapping makes sure the text is confined to the (sub)figure box. It
does not take into account any other artists.
Parameters
----------
wrap : bool
Notes
-----
Wrapping does not work together with
``savefig(..., bbox_inches='tight')`` (which is also used internally
by ``%matplotlib inline`` in IPython/Jupyter). The 'tight' setting
rescales the canvas to accommodate all content and happens before
wrapping.
"""
self._wrap = wrap
def _get_wrap_line_width(self):
"""
Return the maximum line width for wrapping text based on the current
orientation.
"""
x0, y0 = self.get_transform().transform(self.get_position())
figure_box = self.get_figure().get_window_extent()
# Calculate available width based on text alignment
alignment = self.get_horizontalalignment()
self.set_rotation_mode('anchor')
rotation = self.get_rotation()
left = self._get_dist_to_box(rotation, x0, y0, figure_box)
right = self._get_dist_to_box(
(180 + rotation) % 360, x0, y0, figure_box)
if alignment == 'left':
line_width = left
elif alignment == 'right':
line_width = right
else:
line_width = 2 * min(left, right)
return line_width
def _get_dist_to_box(self, rotation, x0, y0, figure_box):
"""
Return the distance from the given points to the boundaries of a
rotated box, in pixels.
"""
if rotation > 270:
quad = rotation - 270
h1 = (y0 - figure_box.y0) / math.cos(math.radians(quad))
h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))
elif rotation > 180:
quad = rotation - 180
h1 = (x0 - figure_box.x0) / math.cos(math.radians(quad))
h2 = (y0 - figure_box.y0) / math.cos(math.radians(90 - quad))
elif rotation > 90:
quad = rotation - 90
h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))
h2 = (x0 - figure_box.x0) / math.cos(math.radians(90 - quad))
else:
h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))
h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))
return min(h1, h2)
def _get_rendered_text_width(self, text):
"""
Return the width of a given text string, in pixels.
"""
w, h, d = _get_text_metrics_with_cache(
self._renderer, text, self.get_fontproperties(),
cbook.is_math_text(text),
self.get_figure(root=True).dpi)
return math.ceil(w)
def _get_wrapped_text(self):
"""
Return a copy of the text string with new lines added so that the text
is wrapped relative to the parent figure (if `get_wrap` is True).
"""
if not self.get_wrap():
return self.get_text()
# Not fit to handle breaking up latex syntax correctly, so
# ignore latex for now.
if self.get_usetex():
return self.get_text()
# Build the line incrementally, for a more accurate measure of length
line_width = self._get_wrap_line_width()
wrapped_lines = []
# New lines in the user's text force a split
unwrapped_lines = self.get_text().split('\n')
# Now wrap each individual unwrapped line
for unwrapped_line in unwrapped_lines:
sub_words = unwrapped_line.split(' ')
# Remove items from sub_words as we go, so stop when empty
while len(sub_words) > 0:
if len(sub_words) == 1:
# Only one word, so just add it to the end
wrapped_lines.append(sub_words.pop(0))
continue
for i in range(2, len(sub_words) + 1):
# Get width of all words up to and including here
line = ' '.join(sub_words[:i])
current_width = self._get_rendered_text_width(line)
# If all these words are too wide, append all not including
# last word
if current_width > line_width:
wrapped_lines.append(' '.join(sub_words[:i - 1]))
sub_words = sub_words[i - 1:]
break
# Otherwise if all words fit in the width, append them all
elif i == len(sub_words):
wrapped_lines.append(' '.join(sub_words[:i]))
sub_words = []
break
return '\n'.join(wrapped_lines)
@artist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if self.get_text() == '':
return
renderer.open_group('text', self.get_gid())
with self._cm_set(text=self._get_wrapped_text()):
bbox, info, descent = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text
# position in Text:
x, y = self._x, self._y
if np.ma.is_masked(x):
x = np.nan
if np.ma.is_masked(y):
y = np.nan
posx = float(self.convert_xunits(x))
posy = float(self.convert_yunits(y))
posx, posy = trans.transform((posx, posy))
if np.isnan(posx) or np.isnan(posy):
return # don't throw a warning here
if not np.isfinite(posx) or not np.isfinite(posy):
_log.warning("posx and posy should be finite values")
return
canvasw, canvash = renderer.get_canvas_width_height()
# Update the location and size of the bbox
# (`.patches.FancyBboxPatch`), and draw it.
if self._bbox_patch:
self.update_bbox_position_size(renderer)
self._bbox_patch.draw(renderer)
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_url(self._url)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
angle = self.get_rotation()
for line, wh, x, y in info:
mtext = self if len(info) == 1 else None
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash - y
clean_line, ismath = self._preprocess_math(line)
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
textrenderer = PathEffectRenderer(
self.get_path_effects(), renderer)
else:
textrenderer = renderer
if self.get_usetex():
textrenderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle,
mtext=mtext)
else:
textrenderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath, mtext=mtext)
gc.restore()
renderer.close_group('text')
self.stale = False
def get_color(self):
"""Return the color of the text."""
return self._color
def get_fontproperties(self):
"""Return the `.font_manager.FontProperties`."""
return self._fontproperties
def get_fontfamily(self):
"""
Return the list of font families used for font lookup.
See Also
--------
.font_manager.FontProperties.get_family
"""
return self._fontproperties.get_family()
def get_fontname(self):
"""
Return the font name as a string.
See Also
--------
.font_manager.FontProperties.get_name
"""
return self._fontproperties.get_name()
def get_fontstyle(self):
"""
Return the font style as a string.
See Also
--------
.font_manager.FontProperties.get_style
"""
return self._fontproperties.get_style()
def get_fontsize(self):
"""
Return the font size as an integer.
See Also
--------
.font_manager.FontProperties.get_size_in_points
"""
return self._fontproperties.get_size_in_points()
def get_fontvariant(self):
"""
Return the font variant as a string.
See Also
--------
.font_manager.FontProperties.get_variant
"""
return self._fontproperties.get_variant()
def get_fontweight(self):
"""
Return the font weight as a string or a number.
See Also
--------
.font_manager.FontProperties.get_weight
"""
return self._fontproperties.get_weight()
def get_stretch(self):
"""
Return the font stretch as a string or a number.
See Also
--------
.font_manager.FontProperties.get_stretch
"""
return self._fontproperties.get_stretch()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as a string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_unitless_position(self):
"""Return the (x, y) unitless position of the text."""
# This will get the position with all unit information stripped away.
# This is here for convenience since it is done in several locations.
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_position(self):
"""Return the (x, y) position of the text."""
# This should return the same data (possible unitized) as was
# specified with 'set_x' and 'set_y'.
return self._x, self._y
def get_text(self):
"""Return the text string."""
return self._text
def get_verticalalignment(self):
"""
Return the vertical alignment as a string. Will be one of
'top', 'center', 'bottom', 'baseline' or 'center_baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
"""
Return the `.Bbox` bounding the text, in display units.
In addition to being used internally, this is useful for specifying
clickable regions in a png file on a web page.
Parameters
----------
renderer : Renderer, optional
A renderer is needed to compute the bounding box. If the artist
has already been drawn, the renderer is cached; thus, it is only
necessary to pass this argument when calling `get_window_extent`
before the first draw. In practice, it is usually easier to
trigger a draw first, e.g. by calling
`~.Figure.draw_without_rendering` or ``plt.show()``.
dpi : float, optional
The dpi value for computing the bbox, defaults to
``self.get_figure(root=True).dpi`` (*not* the renderer dpi); should be set
e.g. if to match regions with a figure saved with a custom dpi value.
"""
if not self.get_visible():
return Bbox.unit()
fig = self.get_figure(root=True)
if dpi is None:
dpi = fig.dpi
if self.get_text() == '':
with cbook._setattr_cm(fig, dpi=dpi):
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx, ty, 0, 0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
self._renderer = fig._get_renderer()
if self._renderer is None:
raise RuntimeError(
"Cannot get window extent of text w/o renderer. You likely "
"want to call 'figure.draw_without_rendering()' first.")
with cbook._setattr_cm(fig, dpi=dpi):
bbox, info, descent = self._get_layout(self._renderer)
x, y = self.get_unitless_position()
x, y = self.get_transform().transform((x, y))
bbox = bbox.translated(x, y)
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
Parameters
----------
color : :mpltype:`color`
See Also
--------
.set_bbox : To change the position of the bounding box
"""
if self._bbox_patch is None:
self.set_bbox(dict(facecolor=color, edgecolor=color))
else:
self._bbox_patch.update(dict(facecolor=color))
self._update_clip_properties()
self.stale = True
def set_color(self, color):
"""
Set the foreground color of the text
Parameters
----------
color : :mpltype:`color`
"""
# "auto" is only supported by axisartist, but we can just let it error
# out at draw time for simplicity.
if not cbook._str_equal(color, "auto"):
mpl.colors._check_color_like(color=color)
self._color = color
self.stale = True
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment relative to the anchor point.
See also :doc:`/gallery/text_labels_and_annotations/text_alignment`.
Parameters
----------
align : {'left', 'center', 'right'}
"""
_api.check_in_list(['center', 'right', 'left'], align=align)
self._horizontalalignment = align
self.stale = True
def set_multialignment(self, align):
"""
Set the text alignment for multiline texts.
The layout of the bounding box of all the lines is determined by the
horizontalalignment and verticalalignment properties. This property
controls the alignment of the text lines within that box.
Parameters
----------
align : {'left', 'right', 'center'}
"""
_api.check_in_list(['center', 'right', 'left'], align=align)
self._multialignment = align
self.stale = True
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
The default line spacing is 1.2.
Parameters
----------
spacing : float (multiple of font size)
"""
_api.check_isinstance(Real, spacing=spacing)
self._linespacing = spacing
self.stale = True
def set_fontfamily(self, fontname):
"""
Set the font family. Can be either a single string, or a list of
strings in decreasing priority. Each string may be either a real font
name or a generic font class name. If the latter, the specific font
names will be looked up in the corresponding rcParams.
If a `Text` instance is constructed with ``fontfamily=None``, then the
font is set to :rc:`font.family`, and the
same is done when `set_fontfamily()` is called on an existing
`Text` instance.
Parameters
----------
fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', \
'monospace'}
See Also
--------
.font_manager.FontProperties.set_family
"""
self._fontproperties.set_family(fontname)
self.stale = True
def set_fontvariant(self, variant):
"""
Set the font variant.
Parameters
----------
variant : {'normal', 'small-caps'}
See Also
--------
.font_manager.FontProperties.set_variant
"""
self._fontproperties.set_variant(variant)
self.stale = True
def set_fontstyle(self, fontstyle):
"""
Set the font style.
Parameters
----------
fontstyle : {'normal', 'italic', 'oblique'}
See Also
--------
.font_manager.FontProperties.set_style
"""
self._fontproperties.set_style(fontstyle)
self.stale = True
def set_fontsize(self, fontsize):
"""
Set the font size.
Parameters
----------
fontsize : float or {'xx-small', 'x-small', 'small', 'medium', \
'large', 'x-large', 'xx-large'}
If a float, the fontsize in points. The string values denote sizes
relative to the default font size.
See Also
--------
.font_manager.FontProperties.set_size
"""
self._fontproperties.set_size(fontsize)
self.stale = True
def get_math_fontfamily(self):
"""
Return the font family name for math text rendered by Matplotlib.
The default value is :rc:`mathtext.fontset`.
See Also
--------
set_math_fontfamily
"""
return self._fontproperties.get_math_fontfamily()
def set_math_fontfamily(self, fontfamily):
"""
Set the font family for math text rendered by Matplotlib.
This does only affect Matplotlib's own math renderer. It has no effect
when rendering with TeX (``usetex=True``).
Parameters
----------
fontfamily : str
The name of the font family.
Available font families are defined in the
:ref:`default matplotlibrc file
`.
See Also
--------
get_math_fontfamily
"""
self._fontproperties.set_math_fontfamily(fontfamily)
def set_fontweight(self, weight):
"""
Set the font weight.
Parameters
----------
weight : {a numeric value in range 0-1000, 'ultralight', 'light', \
'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', \
'demi', 'bold', 'heavy', 'extra bold', 'black'}
See Also
--------
.font_manager.FontProperties.set_weight
"""
self._fontproperties.set_weight(weight)
self.stale = True
def set_fontstretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
Parameters
----------
stretch : {a numeric value in range 0-1000, 'ultra-condensed', \
'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', \
'expanded', 'extra-expanded', 'ultra-expanded'}
See Also
--------
.font_manager.FontProperties.set_stretch
"""
self._fontproperties.set_stretch(stretch)
self.stale = True
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text.
Parameters
----------
xy : (float, float)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text.
Parameters
----------
x : float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the text.
Parameters
----------
y : float
"""
self._y = y
self.stale = True
def set_rotation(self, s):
"""
Set the rotation of the text.
Parameters
----------
s : float or {'vertical', 'horizontal'}
The rotation angle in degrees in mathematically positive direction
(counterclockwise). 'horizontal' equals 0, 'vertical' equals 90.
"""
if isinstance(s, Real):
self._rotation = float(s) % 360
elif cbook._str_equal(s, 'horizontal') or s is None:
self._rotation = 0.
elif cbook._str_equal(s, 'vertical'):
self._rotation = 90.
else:
raise ValueError("rotation must be 'vertical', 'horizontal' or "
f"a number, not {s}")
self.stale = True
def set_transform_rotates_text(self, t):
"""
Whether rotations of the transform affect the text direction.
Parameters
----------
t : bool
"""
self._transform_rotates_text = t
self.stale = True
def set_verticalalignment(self, align):
"""
Set the vertical alignment relative to the anchor point.
See also :doc:`/gallery/text_labels_and_annotations/text_alignment`.
Parameters
----------
align : {'baseline', 'bottom', 'center', 'center_baseline', 'top'}
"""
_api.check_in_list(
['top', 'bottom', 'center', 'baseline', 'center_baseline'],
align=align)
self._verticalalignment = align
self.stale = True
def set_text(self, s):
r"""
Set the text string *s*.
It may contain newlines (``\n``) or math in LaTeX syntax.
Parameters
----------
s : object
Any object gets converted to its `str` representation, except for
``None`` which is converted to an empty string.
"""
s = '' if s is None else str(s)
if s != self._text:
self._text = s
self.stale = True
def _preprocess_math(self, s):
"""
Return the string *s* after mathtext preprocessing, and the kind of
mathtext support needed.
- If *self* is configured to use TeX, return *s* unchanged except that
a single space gets escaped, and the flag "TeX".
- Otherwise, if *s* is mathtext (has an even number of unescaped dollar
signs) and ``parse_math`` is not set to False, return *s* and the
flag True.
- Otherwise, return *s* with dollar signs unescaped, and the flag
False.
"""
if self.get_usetex():
if s == " ":
s = r"\ "
return s, "TeX"
elif not self.get_parse_math():
return s, False
elif cbook.is_math_text(s):
return s, True
else:
return s.replace(r"\$", "$"), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text.
Parameters
----------
fp : `.font_manager.FontProperties` or `str` or `pathlib.Path`
If a `str`, it is interpreted as a fontconfig pattern parsed by
`.FontProperties`. If a `pathlib.Path`, it is interpreted as the
absolute path to a font file.
"""
self._fontproperties = FontProperties._from_any(fp).copy()
self.stale = True
@_docstring.kwarg_doc("bool, default: :rc:`text.usetex`")
def set_usetex(self, usetex):
"""
Parameters
----------
usetex : bool or None
Whether to render using TeX, ``None`` means to use
:rc:`text.usetex`.
"""
if usetex is None:
self._usetex = mpl.rcParams['text.usetex']
else:
self._usetex = bool(usetex)
self.stale = True
def get_usetex(self):
"""Return whether this `Text` object uses TeX for rendering."""
return self._usetex
def set_parse_math(self, parse_math):
"""
Override switch to disable any mathtext parsing for this `Text`.
Parameters
----------
parse_math : bool
If False, this `Text` will never use mathtext. If True, mathtext
will be used if there is an even number of unescaped dollar signs.
"""
self._parse_math = bool(parse_math)
def get_parse_math(self):
"""Return whether mathtext parsing is considered for this `Text`."""
return self._parse_math
def set_fontname(self, fontname):
"""
Alias for `set_fontfamily`.
One-way alias only: the getter differs.
Parameters
----------
fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', \
'monospace'}
See Also
--------
.font_manager.FontProperties.set_family
"""
self.set_fontfamily(fontname)
class OffsetFrom:
"""Callable helper class for working with `Annotation`."""
def __init__(self, artist, ref_coord, unit="points"):
"""
Parameters
----------
artist : `~matplotlib.artist.Artist` or `.BboxBase` or `.Transform`
The object to compute the offset from.
ref_coord : (float, float)
If *artist* is an `.Artist` or `.BboxBase`, this values is
the location to of the offset origin in fractions of the
*artist* bounding box.
If *artist* is a transform, the offset origin is the
transform applied to this value.
unit : {'points, 'pixels'}, default: 'points'
The screen units to use (pixels or points) for the offset input.
"""
self._artist = artist
x, y = ref_coord # Make copy when ref_coord is an array (and check the shape).
self._ref_coord = x, y
self.set_unit(unit)
def set_unit(self, unit):
"""
Set the unit for input to the transform used by ``__call__``.
Parameters
----------
unit : {'points', 'pixels'}
"""
_api.check_in_list(["points", "pixels"], unit=unit)
self._unit = unit
def get_unit(self):
"""Return the unit for input to the transform used by ``__call__``."""
return self._unit
def __call__(self, renderer):
"""
Return the offset transform.
Parameters
----------
renderer : `RendererBase`
The renderer to use to compute the offset
Returns
-------
`Transform`
Maps (x, y) in pixel or point units to screen units
relative to the given artist.
"""
if isinstance(self._artist, Artist):
bbox = self._artist.get_window_extent(renderer)
xf, yf = self._ref_coord
x = bbox.x0 + bbox.width * xf
y = bbox.y0 + bbox.height * yf
elif isinstance(self._artist, BboxBase):
bbox = self._artist
xf, yf = self._ref_coord
x = bbox.x0 + bbox.width * xf
y = bbox.y0 + bbox.height * yf
elif isinstance(self._artist, Transform):
x, y = self._artist.transform(self._ref_coord)
else:
_api.check_isinstance((Artist, BboxBase, Transform), artist=self._artist)
scale = 1 if self._unit == "pixels" else renderer.points_to_pixels(1)
return Affine2D().scale(scale).translate(x, y)
class _AnnotationBase:
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
x, y = xy # Make copy when xy is an array (and check the shape).
self.xy = x, y
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, xy, coords):
x, y = xy
xcoord, ycoord = coords if isinstance(coords, tuple) else (coords, coords)
if xcoord == 'data':
x = float(self.convert_xunits(x))
if ycoord == 'data':
y = float(self.convert_yunits(y))
return self._get_xy_transform(renderer, coords).transform((x, y))
def _get_xy_transform(self, renderer, coords):
if isinstance(coords, tuple):
xcoord, ycoord = coords
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, xcoord)
tr2 = self._get_xy_transform(renderer, ycoord)
return blended_transform_factory(tr1, tr2)
elif callable(coords):
tr = coords(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise TypeError(
f"xycoords callable must return a BboxBase or Transform, not a "
f"{type(tr).__name__}")
elif isinstance(coords, Artist):
bbox = coords.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(coords, BboxBase):
return BboxTransformTo(coords)
elif isinstance(coords, Transform):
return coords
elif not isinstance(coords, str):
raise TypeError(
f"'xycoords' must be an instance of str, tuple[str, str], Artist, "
f"Transform, or Callable, not a {type(coords).__name__}")
if coords == 'data':
return self.axes.transData
elif coords == 'polar':
from matplotlib.projections import PolarAxes
tr = PolarAxes.PolarTransform(apply_theta_transforms=False)
trans = tr + self.axes.transData
return trans
try:
bbox_name, unit = coords.split()
except ValueError: # i.e. len(coords.split()) != 2.
raise ValueError(f"{coords!r} is not a valid coordinate") from None
bbox0, xy0 = None, None
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.get_figure(root=False).figbbox
elif bbox_name == "subfigure":
bbox0 = self.get_figure(root=False).bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# reference x, y in display coordinate
if bbox0 is not None:
xy0 = bbox0.p0
elif bbox_name == "offset":
xy0 = self._get_position_xy(renderer)
else:
raise ValueError(f"{coords!r} is not a valid coordinate")
if unit == "points":
tr = Affine2D().scale(
self.get_figure(root=True).dpi / 72) # dpi/72 dots per point
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
tr = Affine2D().scale(
self.get_size() * self.get_figure(root=True).dpi / 72)
elif unit == "fraction":
tr = Affine2D().scale(*bbox0.size)
else:
raise ValueError(f"{unit!r} is not a recognized unit")
return tr.translate(*xy0)
def set_annotation_clip(self, b):
"""
Set the annotation's clipping behavior.
Parameters
----------
b : bool or None
- True: The annotation will be clipped when ``self.xy`` is
outside the Axes.
- False: The annotation will always be drawn.
- None: The annotation will be clipped when ``self.xy`` is
outside the Axes and ``self.xycoords == "data"``.
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return the annotation's clipping behavior.
See `set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"""Return the pixel position of the annotated point."""
return self._get_xy(renderer, self.xy, self.xycoords)
def _check_xy(self, renderer=None):
"""Check whether the annotation at *xy_pixel* should be drawn."""
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the Axes.
xy_pixel = self._get_position_xy(renderer)
return self.axes.contains_point(xy_pixel)
return True
def draggable(self, state=None, use_blit=False):
"""
Set whether the annotation is draggable with the mouse.
Parameters
----------
state : bool or None
- True or False: set the draggability.
- None: toggle the draggability.
use_blit : bool, default: False
Use blitting for faster image composition. For details see
:ref:`func-animation`.
Returns
-------
DraggableAnnotation or None
If the annotation is draggable, the corresponding
`.DraggableAnnotation` helper is returned.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
class Annotation(Text, _AnnotationBase):
"""
An `.Annotation` is a `.Text` that can refer to a specific position *xy*.
Optionally an arrow pointing from the text to *xy* can be drawn.
Attributes
----------
xy
The annotated position.
xycoords
The coordinate system for *xy*.
arrow_patch
A `.FancyArrowPatch` to point from *xytext* to *xy*.
"""
def __str__(self):
return f"Annotation({self.xy[0]:g}, {self.xy[1]:g}, {self._text!r})"
def __init__(self, text, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
"""
Annotate the point *xy* with text *text*.
In the simplest form, the text is placed at *xy*.
Optionally, the text can be displayed in another position *xytext*.
An arrow pointing from the text to the annotated point *xy* can then
be added by defining *arrowprops*.
Parameters
----------
text : str
The text of the annotation.
xy : (float, float)
The point *(x, y)* to annotate. The coordinate system is determined
by *xycoords*.
xytext : (float, float), default: *xy*
The position *(x, y)* to place the text at. The coordinate system
is determined by *textcoords*.
xycoords : single or two-tuple of str or `.Artist` or `.Transform` or \
callable, default: 'data'
The coordinate system that *xy* is given in. The following types
of values are supported:
- One of the following strings:
==================== ============================================
Value Description
==================== ============================================
'figure points' Points from the lower left of the figure
'figure pixels' Pixels from the lower left of the figure
'figure fraction' Fraction of figure from lower left
'subfigure points' Points from the lower left of the subfigure
'subfigure pixels' Pixels from the lower left of the subfigure
'subfigure fraction' Fraction of subfigure from lower left
'axes points' Points from lower left corner of the Axes
'axes pixels' Pixels from lower left corner of the Axes
'axes fraction' Fraction of Axes from lower left
'data' Use the coordinate system of the object
being annotated (default)
'polar' *(theta, r)* if not native 'data'
coordinates
==================== ============================================
Note that 'subfigure pixels' and 'figure pixels' are the same
for the parent figure, so users who want code that is usable in
a subfigure can use 'subfigure pixels'.
- An `.Artist`: *xy* is interpreted as a fraction of the artist's
`~matplotlib.transforms.Bbox`. E.g. *(0, 0)* would be the lower
left corner of the bounding box and *(0.5, 1)* would be the
center top of the bounding box.
- A `.Transform` to transform *xy* to screen coordinates.
- A function with one of the following signatures::
def transform(renderer) -> Bbox
def transform(renderer) -> Transform
where *renderer* is a `.RendererBase` subclass.
The result of the function is interpreted like the `.Artist` and
`.Transform` cases above.
- A tuple *(xcoords, ycoords)* specifying separate coordinate
systems for *x* and *y*. *xcoords* and *ycoords* must each be
of one of the above described types.
See :ref:`plotting-guide-annotation` for more details.
textcoords : single or two-tuple of str or `.Artist` or `.Transform` \
or callable, default: value of *xycoords*
The coordinate system that *xytext* is given in.
All *xycoords* values are valid as well as the following strings:
================= =================================================
Value Description
================= =================================================
'offset points' Offset, in points, from the *xy* value
'offset pixels' Offset, in pixels, from the *xy* value
'offset fontsize' Offset, relative to fontsize, from the *xy* value
================= =================================================
arrowprops : dict, optional
The properties used to draw a `.FancyArrowPatch` arrow between the
positions *xy* and *xytext*. Defaults to None, i.e. no arrow is
drawn.
For historical reasons there are two different ways to specify
arrows, "simple" and "fancy":
**Simple arrow:**
If *arrowprops* does not contain the key 'arrowstyle' the
allowed keys are:
========== =================================================
Key Description
========== =================================================
width The width of the arrow in points
headwidth The width of the base of the arrow head in points
headlength The length of the arrow head in points
shrink Fraction of total length to shrink from both ends
? Any `.FancyArrowPatch` property
========== =================================================
The arrow is attached to the edge of the text box, the exact
position (corners or centers) depending on where it's pointing to.
**Fancy arrow:**
This is used if 'arrowstyle' is provided in the *arrowprops*.
Valid keys are the following `.FancyArrowPatch` parameters:
=============== ===================================
Key Description
=============== ===================================
arrowstyle The arrow style
connectionstyle The connection style
relpos See below; default is (0.5, 0.5)
patchA Default is bounding box of the text
patchB Default is None
shrinkA In points. Default is 2 points
shrinkB In points. Default is 2 points
mutation_scale Default is text size (in points)
mutation_aspect Default is 1
? Any `.FancyArrowPatch` property
=============== ===================================
The exact starting point position of the arrow is defined by
*relpos*. It's a tuple of relative coordinates of the text box,
where (0, 0) is the lower left corner and (1, 1) is the upper
right corner. Values <0 and >1 are supported and specify points
outside the text box. By default (0.5, 0.5), so the starting point
is centered in the text box.
annotation_clip : bool or None, default: None
Whether to clip (i.e. not draw) the annotation when the annotation
point *xy* is outside the Axes area.
- If *True*, the annotation will be clipped when *xy* is outside
the Axes.
- If *False*, the annotation will always be drawn.
- If *None*, the annotation will be clipped when *xy* is outside
the Axes and *xycoords* is 'data'.
**kwargs
Additional kwargs are passed to `.Text`.
Returns
-------
`.Annotation`
See Also
--------
:ref:`annotations`
"""
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
_api.warn_external("You have used the `textcoords` kwarg, but "
"not the `xytext` kwarg. This can lead to "
"surprising results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
self.arrowprops = arrowprops
if arrowprops is not None:
arrowprops = arrowprops.copy()
if "arrowstyle" in arrowprops:
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
else:
# modified YAArrow API to be used with FancyArrowPatch
for key in ['width', 'headwidth', 'headlength', 'shrink']:
arrowprops.pop(key, None)
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1), **arrowprops)
else:
self.arrow_patch = None
# Must come last, as some kwargs may be propagated to arrow_patch.
Text.__init__(self, x, y, text, **kwargs)
def contains(self, mouseevent):
if self._different_canvas(mouseevent):
return False, {}
contains, tinfo = Text.contains(self, mouseevent)
if self.arrow_patch is not None:
in_patch, _ = self.arrow_patch.contains(mouseevent)
contains = contains or in_patch
return contains, tinfo
@property
def xycoords(self):
return self._xycoords
@xycoords.setter
def xycoords(self, xycoords):
def is_offset(s):
return isinstance(s, str) and s.startswith("offset")
if (isinstance(xycoords, tuple) and any(map(is_offset, xycoords))
or is_offset(xycoords)):
raise ValueError("xycoords cannot be an offset coordinate")
self._xycoords = xycoords
@property
def xyann(self):
"""
The text position.
See also *xytext* in `.Annotation`.
"""
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
def get_anncoords(self):
"""
Return the coordinate system to use for `.Annotation.xyann`.
See also *xycoords* in `.Annotation`.
"""
return self._textcoords
def set_anncoords(self, coords):
"""
Set the coordinate system to use for `.Annotation.xyann`.
See also *xycoords* in `.Annotation`.
"""
self._textcoords = coords
anncoords = property(get_anncoords, set_anncoords, doc="""
The coordinate system to use for `.Annotation.xyann`.""")
def set_figure(self, fig):
# docstring inherited
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
"""
Update the pixel positions of the annotation text and the arrow patch.
"""
# generate transformation
self.set_transform(self._get_xy_transform(renderer, self.anncoords))
arrowprops = self.arrowprops
if arrowprops is None:
return
bbox = Text.get_window_extent(self, renderer)
arrow_end = x1, y1 = self._get_position_xy(renderer) # Annotated pos.
ms = arrowprops.get("mutation_scale", self.get_size())
self.arrow_patch.set_mutation_scale(ms)
if "arrowstyle" not in arrowprops:
# Approximately simulate the YAArrow.
shrink = arrowprops.get('shrink', 0.0)
width = arrowprops.get('width', 4)
headwidth = arrowprops.get('headwidth', 12)
headlength = arrowprops.get('headlength', 12)
# NB: ms is in pts
stylekw = dict(head_length=headlength / ms,
head_width=headwidth / ms,
tail_width=width / ms)
self.arrow_patch.set_arrowstyle('simple', **stylekw)
# using YAArrow style:
# pick the corner of the text bbox closest to annotated point.
xpos = [(bbox.x0, 0), ((bbox.x0 + bbox.x1) / 2, 0.5), (bbox.x1, 1)]
ypos = [(bbox.y0, 0), ((bbox.y0 + bbox.y1) / 2, 0.5), (bbox.y1, 1)]
x, relposx = min(xpos, key=lambda v: abs(v[0] - x1))
y, relposy = min(ypos, key=lambda v: abs(v[0] - y1))
self._arrow_relpos = (relposx, relposy)
r = np.hypot(y - y1, x - x1)
shrink_pts = shrink * r / renderer.points_to_pixels(1)
self.arrow_patch.shrinkA = self.arrow_patch.shrinkB = shrink_pts
# adjust the starting point of the arrow relative to the textbox.
# TODO : Rotation needs to be accounted.
arrow_begin = bbox.p0 + bbox.size * self._arrow_relpos
# The arrow is drawn from arrow_begin to arrow_end. It will be first
# clipped by patchA and patchB. Then it will be shrunk by shrinkA and
# shrinkB (in points). If patchA is not set, self.bbox_patch is used.
self.arrow_patch.set_positions(arrow_begin, arrow_end)
if "patchA" in arrowprops:
patchA = arrowprops["patchA"]
elif self._bbox_patch:
patchA = self._bbox_patch
elif self.get_text() == "":
patchA = None
else:
pad = renderer.points_to_pixels(4)
patchA = Rectangle(
xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2),
width=bbox.width + pad, height=bbox.height + pad,
transform=IdentityTransform(), clip_on=False)
self.arrow_patch.set_patchA(patchA)
@artist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if renderer is not None:
self._renderer = renderer
if not self.get_visible() or not self._check_xy(renderer):
return
# Update text positions before `Text.draw` would, so that the
# FancyArrowPatch is correctly positioned.
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow_patch is not None: # FancyArrowPatch
if (self.arrow_patch.get_figure(root=False) is None and
(fig := self.get_figure(root=False)) is not None):
self.arrow_patch.set_figure(fig)
self.arrow_patch.draw(renderer)
# Draw text, including FancyBboxPatch, after FancyArrowPatch.
# Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
# docstring inherited
# This block is the same as in Text.get_window_extent, but we need to
# set the renderer before calling update_positions().
if not self.get_visible() or not self._check_xy(renderer):
return Bbox.unit()
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
self._renderer = self.get_figure(root=True)._get_renderer()
if self._renderer is None:
raise RuntimeError('Cannot get window extent without renderer')
self.update_positions(self._renderer)
text_bbox = Text.get_window_extent(self)
bboxes = [text_bbox]
if self.arrow_patch is not None:
bboxes.append(self.arrow_patch.get_window_extent())
return Bbox.union(bboxes)
def get_tightbbox(self, renderer=None):
# docstring inherited
if not self._check_xy(renderer):
return Bbox.null()
return super().get_tightbbox(renderer)
_docstring.interpd.register(Annotation=Annotation.__init__.__doc__)
venv\Lib\site-packages\matplotlib\textpath.py
from collections import OrderedDict
import logging
import urllib.parse
import numpy as np
from matplotlib import _text_helpers, dviread
from matplotlib.font_manager import (
FontProperties, get_font, fontManager as _fontManager
)
from matplotlib.ft2font import LoadFlags
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
_log = logging.getLogger(__name__)
class TextToPath:
"""A class that converts strings to paths."""
FONT_SCALE = 100.
DPI = 72
def __init__(self):
self.mathtext_parser = MathTextParser('path')
self._texmanager = None
def _get_font(self, prop):
"""
Find the `FT2Font` matching font properties *prop*, with its size set.
"""
filenames = _fontManager._find_fonts_by_props(prop)
font = get_font(filenames)
font.set_size(self.FONT_SCALE, self.DPI)
return font
def _get_hinting_flag(self):
return LoadFlags.NO_HINTING
def _get_char_id(self, font, ccode):
"""
Return a unique id for the given font and character-code set.
"""
return urllib.parse.quote(f"{font.postscript_name}-{ccode:x}")
def get_text_width_height_descent(self, s, prop, ismath):
fontsize = prop.get_size_in_points()
if ismath == "TeX":
return TexManager().get_text_width_height_descent(s, fontsize)
scale = fontsize / self.FONT_SCALE
if ismath:
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, *_ = \
self.mathtext_parser.parse(s, 72, prop)
return width * scale, height * scale, descent * scale
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LoadFlags.NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
return w * scale, h * scale, d * scale
def get_text_path(self, prop, s, ismath=False):
"""
Convert text *s* to path (a tuple of vertices and codes for
matplotlib.path.Path).
Parameters
----------
prop : `~matplotlib.font_manager.FontProperties`
The font properties for the text.
s : str
The text to be converted.
ismath : {False, True, "TeX"}
If True, use mathtext parser. If "TeX", use tex for rendering.
Returns
-------
verts : list
A list of arrays containing the (x, y) coordinates of the vertices.
codes : list
A list of path codes.
Examples
--------
Create a list of vertices and codes from a text, and create a `.Path`
from those::
from matplotlib.path import Path
from matplotlib.text import TextToPath
from matplotlib.font_manager import FontProperties
fp = FontProperties(family="Comic Neue", style="italic")
verts, codes = TextToPath().get_text_path(fp, "ABC")
path = Path(verts, codes, closed=False)
Also see `TextPath` for a more direct way to create a path from a text.
"""
if ismath == "TeX":
glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s)
elif not ismath:
font = self._get_font(prop)
glyph_info, glyph_map, rects = self.get_glyphs_with_font(font, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_mathtext(prop, s)
verts, codes = [], []
for glyph_id, xposition, yposition, scale in glyph_info:
verts1, codes1 = glyph_map[glyph_id]
verts.extend(verts1 * scale + [xposition, yposition])
codes.extend(codes1)
for verts1, codes1 in rects:
verts.extend(verts1)
codes.extend(codes1)
# Make sure an empty string or one with nothing to print
# (e.g. only spaces & newlines) will be valid/empty path
if not verts:
verts = np.empty((0, 2))
return verts, codes
def get_glyphs_with_font(self, font, s, glyph_map=None,
return_new_glyphs_only=False):
"""
Convert string *s* to vertices and codes using the provided ttf font.
"""
if glyph_map is None:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
xpositions = []
glyph_ids = []
for item in _text_helpers.layout(s, font):
char_id = self._get_char_id(item.ft_object, ord(item.char))
glyph_ids.append(char_id)
xpositions.append(item.x)
if char_id not in glyph_map:
glyph_map_new[char_id] = item.ft_object.get_path()
ypositions = [0] * len(xpositions)
sizes = [1.] * len(xpositions)
rects = []
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, rects)
def get_glyphs_mathtext(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
Parse mathtext string *s* and convert it to a (vertices, codes) pair.
"""
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.DPI, prop)
if not glyph_map:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
xpositions = []
ypositions = []
glyph_ids = []
sizes = []
for font, fontsize, ccode, ox, oy in glyphs:
char_id = self._get_char_id(font, ccode)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
font.load_char(ccode, flags=LoadFlags.NO_HINTING)
glyph_map_new[char_id] = font.get_path()
xpositions.append(ox)
ypositions.append(oy)
glyph_ids.append(char_id)
size = fontsize / self.FONT_SCALE
sizes.append(size)
myrects = []
for ox, oy, w, h in rects:
vert1 = [(ox, oy), (ox, oy + h), (ox + w, oy + h),
(ox + w, oy), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
def get_glyphs_tex(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""Convert the string *s* to vertices and codes using usetex mode."""
# Mostly borrowed from pdf backend.
dvifile = TexManager().make_dvi(s, self.FONT_SCALE)
with dviread.Dvi(dvifile, self.DPI) as dvi:
page, = dvi
if glyph_map is None:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
glyph_ids, xpositions, ypositions, sizes = [], [], [], []
# Gather font information and do some setup for combining
# characters into strings.
for text in page.text:
font = get_font(text.font_path)
char_id = self._get_char_id(font, text.glyph)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
glyph_name_or_index = text.glyph_name_or_index
if isinstance(glyph_name_or_index, str):
index = font.get_name_index(glyph_name_or_index)
font.load_glyph(index, flags=LoadFlags.TARGET_LIGHT)
elif isinstance(glyph_name_or_index, int):
self._select_native_charmap(font)
font.load_char(
glyph_name_or_index, flags=LoadFlags.TARGET_LIGHT)
else: # Should not occur.
raise TypeError(f"Glyph spec of unexpected type: "
f"{glyph_name_or_index!r}")
glyph_map_new[char_id] = font.get_path()
glyph_ids.append(char_id)
xpositions.append(text.x)
ypositions.append(text.y)
sizes.append(text.font_size / self.FONT_SCALE)
myrects = []
for ox, oy, h, w in page.boxes:
vert1 = [(ox, oy), (ox + w, oy), (ox + w, oy + h),
(ox, oy + h), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
@staticmethod
def _select_native_charmap(font):
# Select the native charmap. (we can't directly identify it but it's
# typically an Adobe charmap).
for charmap_code in [
1094992451, # ADOBE_CUSTOM.
1094995778, # ADOBE_STANDARD.
]:
try:
font.select_charmap(charmap_code)
except (ValueError, RuntimeError):
pass
else:
break
else:
_log.warning("No supported encoding in font (%s).", font.fname)
text_to_path = TextToPath()
class TextPath(Path):
"""
Create a path from the text.
"""
def __init__(self, xy, s, size=None, prop=None,
_interpolation_steps=1, usetex=False):
r"""
Create a path from the text. Note that it simply is a path,
not an artist. You need to use the `.PathPatch` (or other artists)
to draw this path onto the canvas.
Parameters
----------
xy : tuple or array of two float values
Position of the text. For no offset, use ``xy=(0, 0)``.
s : str
The text to convert to a path.
size : float, optional
Font size in points. Defaults to the size specified via the font
properties *prop*.
prop : `~matplotlib.font_manager.FontProperties`, optional
Font property. If not provided, will use a default
`.FontProperties` with parameters from the
:ref:`rcParams`.
_interpolation_steps : int, optional
(Currently ignored)
usetex : bool, default: False
Whether to use tex rendering.
Examples
--------
The following creates a path from the string "ABC" with Helvetica
font face; and another path from the latex fraction 1/2::
from matplotlib.text import TextPath
from matplotlib.font_manager import FontProperties
fp = FontProperties(family="Helvetica", style="italic")
path1 = TextPath((12, 12), "ABC", size=12, prop=fp)
path2 = TextPath((0, 0), r"$\frac{1}{2}$", size=12, usetex=True)
Also see :doc:`/gallery/text_labels_and_annotations/demo_text_path`.
"""
# Circular import.
from matplotlib.text import Text
prop = FontProperties._from_any(prop)
if size is None:
size = prop.get_size_in_points()
self._xy = xy
self.set_size(size)
self._cached_vertices = None
s, ismath = Text(usetex=usetex)._preprocess_math(s)
super().__init__(
*text_to_path.get_text_path(prop, s, ismath=ismath),
_interpolation_steps=_interpolation_steps,
readonly=True)
self._should_simplify = False
def set_size(self, size):
"""Set the text size."""
self._size = size
self._invalid = True
def get_size(self):
"""Get the text size."""
return self._size
@property
def vertices(self):
"""
Return the cached path after updating it if necessary.
"""
self._revalidate_path()
return self._cached_vertices
@property
def codes(self):
"""
Return the codes
"""
return self._codes
def _revalidate_path(self):
"""
Update the path if necessary.
The path for the text is initially create with the font size of
`.FONT_SCALE`, and this path is rescaled to other size when necessary.
"""
if self._invalid or self._cached_vertices is None:
tr = (Affine2D()
.scale(self._size / text_to_path.FONT_SCALE)
.translate(*self._xy))
self._cached_vertices = tr.transform(self._vertices)
self._cached_vertices.flags.writeable = False
self._invalid = False
venv\Lib\site-packages\matplotlib\ticker.py
"""
Tick locating and formatting
============================
This module contains classes for configuring tick locating and formatting.
Generic tick locators and formatters are provided, as well as domain specific
custom ones.
Although the locators know nothing about major or minor ticks, they are used
by the Axis class to support major and minor tick locating and formatting.
.. _tick_locating:
.. _locators:
Tick locating
-------------
The Locator class is the base class for all tick locators. The locators
handle autoscaling of the view limits based on the data limits, and the
choosing of tick locations. A useful semi-automatic tick locator is
`MultipleLocator`. It is initialized with a base, e.g., 10, and it picks
axis limits and ticks that are multiples of that base.
The Locator subclasses defined here are:
======================= =======================================================
`AutoLocator` `MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
`MaxNLocator` Finds up to a max number of intervals with ticks at
nice locations.
`LinearLocator` Space ticks evenly from min to max.
`LogLocator` Space ticks logarithmically from min to max.
`MultipleLocator` Ticks and range are a multiple of base; either integer
or float.
`FixedLocator` Tick locations are fixed.
`IndexLocator` Locator for index plots (e.g., where
``x = range(len(y))``).
`NullLocator` No ticks.
`SymmetricalLogLocator` Locator for use with the symlog norm; works like
`LogLocator` for the part outside of the threshold and
adds 0 if inside the limits.
`AsinhLocator` Locator for use with the asinh norm, attempting to
space ticks approximately uniformly.
`LogitLocator` Locator for logit scaling.
`AutoMinorLocator` Locator for minor ticks when the axis is linear and the
major ticks are uniformly spaced. Subdivides the major
tick interval into a specified number of minor
intervals, defaulting to 4 or 5 depending on the major
interval.
======================= =======================================================
There are a number of locators specialized for date locations - see
the :mod:`.dates` module.
You can define your own locator by deriving from Locator. You must
override the ``__call__`` method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a custom
locator and pass it to the x- or y-axis instance. The relevant methods are::
ax.xaxis.set_major_locator(xmajor_locator)
ax.xaxis.set_minor_locator(xminor_locator)
ax.yaxis.set_major_locator(ymajor_locator)
ax.yaxis.set_minor_locator(yminor_locator)
The default minor locator is `NullLocator`, i.e., no minor ticks on by default.
.. note::
`Locator` instances should not be used with more than one
`~matplotlib.axis.Axis` or `~matplotlib.axes.Axes`. So instead of::
locator = MultipleLocator(5)
ax.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_locator(locator)
do the following instead::
ax.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_major_locator(MultipleLocator(5))
.. _formatters:
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The formatter
operates on a single tick value and returns a string to the axis.
========================= =====================================================
`NullFormatter` No labels on the ticks.
`FixedFormatter` Set the strings manually for the labels.
`FuncFormatter` User defined function sets the labels.
`StrMethodFormatter` Use string `format` method.
`FormatStrFormatter` Use an old-style sprintf format string.
`ScalarFormatter` Default formatter for scalars: autopick the format
string.
`LogFormatter` Formatter for log axes.
`LogFormatterExponent` Format values for log axis using
``exponent = log_base(value)``.
`LogFormatterMathtext` Format values for log axis using
``exponent = log_base(value)`` using Math text.
`LogFormatterSciNotation` Format values for log axis using scientific notation.
`LogitFormatter` Probability formatter.
`EngFormatter` Format labels in engineering notation.
`PercentFormatter` Format labels as a percentage.
========================= =====================================================
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has
access to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter(xmajor_formatter)
ax.xaxis.set_minor_formatter(xminor_formatter)
ax.yaxis.set_major_formatter(ymajor_formatter)
ax.yaxis.set_minor_formatter(yminor_formatter)
In addition to a `.Formatter` instance, `~.Axis.set_major_formatter` and
`~.Axis.set_minor_formatter` also accept a ``str`` or function. ``str`` input
will be internally replaced with an autogenerated `.StrMethodFormatter` with
the input ``str``. For function input, a `.FuncFormatter` with the input
function will be generated and used.
See :doc:`/gallery/ticks/major_minor_demo` for an example of setting major
and minor ticks. See the :mod:`matplotlib.dates` module for more information
and examples of using date locators and formatters.
"""
import itertools
import logging
import locale
import math
from numbers import Integral
import string
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook
from matplotlib import transforms as mtransforms
_log = logging.getLogger(__name__)
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'StrMethodFormatter', 'ScalarFormatter', 'LogFormatter',
'LogFormatterExponent', 'LogFormatterMathtext',
'LogFormatterSciNotation',
'LogitFormatter', 'EngFormatter', 'PercentFormatter',
'Locator', 'IndexLocator', 'FixedLocator', 'NullLocator',
'LinearLocator', 'LogLocator', 'AutoLocator',
'MultipleLocator', 'MaxNLocator', 'AutoMinorLocator',
'SymmetricalLogLocator', 'AsinhLocator', 'LogitLocator')
class _DummyAxis:
__name__ = "dummy"
def __init__(self, minpos=0):
self._data_interval = (0, 1)
self._view_interval = (0, 1)
self._minpos = minpos
def get_view_interval(self):
return self._view_interval
def set_view_interval(self, vmin, vmax):
self._view_interval = (vmin, vmax)
def get_minpos(self):
return self._minpos
def get_data_interval(self):
return self._data_interval
def set_data_interval(self, vmin, vmax):
self._data_interval = (vmin, vmax)
def get_tick_space(self):
# Just use the long-standing default of nbins==9
return 9
class TickHelper:
axis = None
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
class Formatter(TickHelper):
"""
Create a string based on a tick value and location.
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
"""
Return the format for tick value *x* at position pos.
``pos=None`` indicates an unspecified location.
"""
raise NotImplementedError('Derived must override')
def format_ticks(self, values):
"""Return the tick labels for all the ticks at once."""
self.set_locs(values)
return [self(value, i) for i, value in enumerate(values)]
def format_data(self, value):
"""
Return the full string representation of the value with the
position unspecified.
"""
return self.__call__(value)
def format_data_short(self, value):
"""
Return a short string version of the tick value.
Defaults to the position-independent long value.
"""
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
"""
Set the locations of the ticks.
This method is called before computing the tick labels because some
formatters need to know all tick locations to do so.
"""
self.locs = locs
@staticmethod
def fix_minus(s):
"""
Some classes may want to replace a hyphen for minus with the proper
Unicode symbol (U+2212) for typographical correctness. This is a
helper method to perform such a replacement when it is enabled via
:rc:`axes.unicode_minus`.
"""
return (s.replace('-', '\N{MINUS SIGN}')
if mpl.rcParams['axes.unicode_minus']
else s)
def _set_locator(self, locator):
"""Subclasses may want to override this to set a locator."""
pass
class NullFormatter(Formatter):
"""Always return the empty string."""
def __call__(self, x, pos=None):
# docstring inherited
return ''
class FixedFormatter(Formatter):
"""
Return fixed strings for tick labels based only on position, not value.
.. note::
`.FixedFormatter` should only be used together with `.FixedLocator`.
Otherwise, the labels may end up in unexpected positions.
"""
def __init__(self, seq):
"""Set the sequence *seq* of strings that will be used for labels."""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
"""
Return the label that matches the position, regardless of the value.
For positions ``pos < len(seq)``, return ``seq[i]`` regardless of
*x*. Otherwise return empty string. ``seq`` is the sequence of
strings that this object was initialized with.
"""
if pos is None or pos >= len(self.seq):
return ''
else:
return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
Use a user-defined function for formatting.
The function should take in two inputs (a tick value ``x`` and a
position ``pos``), and return a string containing the corresponding
tick label.
"""
def __init__(self, func):
self.func = func
self.offset_string = ""
def __call__(self, x, pos=None):
"""
Return the value of the user defined function.
*x* and *pos* are passed through as-is.
"""
return self.func(x, pos)
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FormatStrFormatter(Formatter):
"""
Use an old-style ('%' operator) format string to format the tick.
The format string should have a single variable format (%) in it.
It will be applied to the value (not the position) of the tick.
Negative numeric values (e.g., -1) will use a dash, not a Unicode minus;
use mathtext to get a Unicode minus by wrapping the format specifier with $
(e.g. "$%g$").
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
"""
Return the formatted label string.
Only the value *x* is formatted. The position is ignored.
"""
return self.fmt % x
class _UnicodeMinusFormat(string.Formatter):
"""
A specialized string formatter so that `.StrMethodFormatter` respects
:rc:`axes.unicode_minus`. This implementation relies on the fact that the
format string is only ever called with kwargs *x* and *pos*, so it blindly
replaces dashes by unicode minuses without further checking.
"""
def format_field(self, value, format_spec):
return Formatter.fix_minus(super().format_field(value, format_spec))
class StrMethodFormatter(Formatter):
"""
Use a new-style format string (as used by `str.format`) to format the tick.
The field used for the tick value must be labeled *x* and the field used
for the tick position must be labeled *pos*.
The formatter will respect :rc:`axes.unicode_minus` when formatting
negative numeric values.
It is typically unnecessary to explicitly construct `.StrMethodFormatter`
objects, as `~.Axis.set_major_formatter` directly accepts the format string
itself.
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
"""
Return the formatted label string.
*x* and *pos* are passed to `str.format` as keyword arguments
with those exact names.
"""
return _UnicodeMinusFormat().format(self.fmt, x=x, pos=pos)
class ScalarFormatter(Formatter):
"""
Format tick values as a number.
Parameters
----------
useOffset : bool or float, default: :rc:`axes.formatter.useoffset`
Whether to use offset notation. See `.set_useOffset`.
useMathText : bool, default: :rc:`axes.formatter.use_mathtext`
Whether to use fancy math formatting. See `.set_useMathText`.
useLocale : bool, default: :rc:`axes.formatter.use_locale`.
Whether to use locale settings for decimal sign and positive sign.
See `.set_useLocale`.
usetex : bool, default: :rc:`text.usetex`
To enable/disable the use of TeX's math mode for rendering the
numbers in the formatter.
.. versionadded:: 3.10
Notes
-----
In addition to the parameters above, the formatting of scientific vs.
floating point representation can be configured via `.set_scientific`
and `.set_powerlimits`).
**Offset notation and scientific notation**
Offset notation and scientific notation look quite similar at first sight.
Both split some information from the formatted tick values and display it
at the end of the axis.
- The scientific notation splits up the order of magnitude, i.e. a
multiplicative scaling factor, e.g. ``1e6``.
- The offset notation separates an additive constant, e.g. ``+1e6``. The
offset notation label is always prefixed with a ``+`` or ``-`` sign
and is thus distinguishable from the order of magnitude label.
The following plot with x limits ``1_000_000`` to ``1_000_010`` illustrates
the different formatting. Note the labels at the right edge of the x axis.
.. plot::
lim = (1_000_000, 1_000_010)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, gridspec_kw={'hspace': 2})
ax1.set(title='offset notation', xlim=lim)
ax2.set(title='scientific notation', xlim=lim)
ax2.xaxis.get_major_formatter().set_useOffset(False)
ax3.set(title='floating-point notation', xlim=lim)
ax3.xaxis.get_major_formatter().set_useOffset(False)
ax3.xaxis.get_major_formatter().set_scientific(False)
"""
def __init__(self, useOffset=None, useMathText=None, useLocale=None, *,
usetex=None):
if useOffset is None:
useOffset = mpl.rcParams['axes.formatter.useoffset']
self._offset_threshold = \
mpl.rcParams['axes.formatter.offset_threshold']
self.set_useOffset(useOffset)
self.set_usetex(usetex)
self.set_useMathText(useMathText)
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = mpl.rcParams['axes.formatter.limits']
self.set_useLocale(useLocale)
def get_usetex(self):
"""Return whether TeX's math mode is enabled for rendering."""
return self._usetex
def set_usetex(self, val):
"""Set whether to use TeX's math mode for rendering numbers in the formatter."""
self._usetex = mpl._val_or_rc(val, 'text.usetex')
usetex = property(fget=get_usetex, fset=set_usetex)
def get_useOffset(self):
"""
Return whether automatic mode for offset notation is active.
This returns True if ``set_useOffset(True)``; it returns False if an
explicit offset was set, e.g. ``set_useOffset(1000)``.
See Also
--------
ScalarFormatter.set_useOffset
"""
return self._useOffset
def set_useOffset(self, val):
"""
Set whether to use offset notation.
When formatting a set numbers whose value is large compared to their
range, the formatter can separate an additive constant. This can
shorten the formatted numbers so that they are less likely to overlap
when drawn on an axis.
Parameters
----------
val : bool or float
- If False, do not use offset notation.
- If True (=automatic mode), use offset notation if it can make
the residual numbers significantly shorter. The exact behavior
is controlled by :rc:`axes.formatter.offset_threshold`.
- If a number, force an offset of the given value.
Examples
--------
With active offset notation, the values
``100_000, 100_002, 100_004, 100_006, 100_008``
will be formatted as ``0, 2, 4, 6, 8`` plus an offset ``+1e5``, which
is written to the edge of the axis.
"""
if val in [True, False]:
self.offset = 0
self._useOffset = val
else:
self._useOffset = False
self.offset = val
useOffset = property(fget=get_useOffset, fset=set_useOffset)
def get_useLocale(self):
"""
Return whether locale settings are used for formatting.
See Also
--------
ScalarFormatter.set_useLocale
"""
return self._useLocale
def set_useLocale(self, val):
"""
Set whether to use locale settings for decimal sign and positive sign.
Parameters
----------
val : bool or None
*None* resets to :rc:`axes.formatter.use_locale`.
"""
if val is None:
self._useLocale = mpl.rcParams['axes.formatter.use_locale']
else:
self._useLocale = val
useLocale = property(fget=get_useLocale, fset=set_useLocale)
def _format_maybe_minus_and_locale(self, fmt, arg):
"""
Format *arg* with *fmt*, applying Unicode minus and locale if desired.
"""
return self.fix_minus(
# Escape commas introduced by locale.format_string if using math text,
# but not those present from the beginning in fmt.
(",".join(locale.format_string(part, (arg,), True).replace(",", "{,}")
for part in fmt.split(",")) if self._useMathText
else locale.format_string(fmt, (arg,), True))
if self._useLocale
else fmt % arg)
def get_useMathText(self):
"""
Return whether to use fancy math formatting.
See Also
--------
ScalarFormatter.set_useMathText
"""
return self._useMathText
def set_useMathText(self, val):
r"""
Set whether to use fancy math formatting.
If active, scientific notation is formatted as :math:`1.2 \times 10^3`.
Parameters
----------
val : bool or None
*None* resets to :rc:`axes.formatter.use_mathtext`.
"""
if val is None:
self._useMathText = mpl.rcParams['axes.formatter.use_mathtext']
if self._useMathText is False:
try:
from matplotlib import font_manager
ufont = font_manager.findfont(
font_manager.FontProperties(
family=mpl.rcParams["font.family"]
),
fallback_to_default=False,
)
except ValueError:
ufont = None
if ufont == str(cbook._get_data_path("fonts/ttf/cmr10.ttf")):
_api.warn_external(
"cmr10 font should ideally be used with "
"mathtext, set axes.formatter.use_mathtext to True"
)
else:
self._useMathText = val
useMathText = property(fget=get_useMathText, fset=set_useMathText)
def __call__(self, x, pos=None):
"""
Return the format for tick value *x* at position *pos*.
"""
if len(self.locs) == 0:
return ''
else:
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if abs(xp) < 1e-8:
xp = 0
return self._format_maybe_minus_and_locale(self.format, xp)
def set_scientific(self, b):
"""
Turn scientific notation on or off.
See Also
--------
ScalarFormatter.set_powerlimits
"""
self._scientific = bool(b)
def set_powerlimits(self, lims):
r"""
Set size thresholds for scientific notation.
Parameters
----------
lims : (int, int)
A tuple *(min_exp, max_exp)* containing the powers of 10 that
determine the switchover threshold. For a number representable as
:math:`a \times 10^\mathrm{exp}` with :math:`1 <= |a| < 10`,
scientific notation will be used if ``exp <= min_exp`` or
``exp >= max_exp``.
The default limits are controlled by :rc:`axes.formatter.limits`.
In particular numbers with *exp* equal to the thresholds are
written in scientific notation.
Typically, *min_exp* will be negative and *max_exp* will be
positive.
For example, ``formatter.set_powerlimits((-3, 4))`` will provide
the following formatting:
:math:`1 \times 10^{-3}, 9.9 \times 10^{-3}, 0.01,`
:math:`9999, 1 \times 10^4`.
See Also
--------
ScalarFormatter.set_scientific
"""
if len(lims) != 2:
raise ValueError("'lims' must be a sequence of length 2")
self._powerlimits = lims
def format_data_short(self, value):
# docstring inherited
if value is np.ma.masked:
return ""
if isinstance(value, Integral):
fmt = "%d"
else:
if getattr(self.axis, "__name__", "") in ["xaxis", "yaxis"]:
if self.axis.__name__ == "xaxis":
axis_trf = self.axis.axes.get_xaxis_transform()
axis_inv_trf = axis_trf.inverted()
screen_xy = axis_trf.transform((value, 0))
neighbor_values = axis_inv_trf.transform(
screen_xy + [[-1, 0], [+1, 0]])[:, 0]
else: # yaxis:
axis_trf = self.axis.axes.get_yaxis_transform()
axis_inv_trf = axis_trf.inverted()
screen_xy = axis_trf.transform((0, value))
neighbor_values = axis_inv_trf.transform(
screen_xy + [[0, -1], [0, +1]])[:, 1]
delta = abs(neighbor_values - value).max()
else:
# Rough approximation: no more than 1e4 divisions.
a, b = self.axis.get_view_interval()
delta = (b - a) / 1e4
fmt = f"%-#.{cbook._g_sig_digits(value, delta)}g"
return self._format_maybe_minus_and_locale(fmt, value)
def format_data(self, value):
# docstring inherited
e = math.floor(math.log10(abs(value)))
s = round(value / 10**e, 10)
significand = self._format_maybe_minus_and_locale(
"%d" if s % 1 == 0 else "%1.10g", s)
if e == 0:
return significand
exponent = self._format_maybe_minus_and_locale("%d", e)
if self._useMathText or self._usetex:
exponent = "10^{%s}" % exponent
return (exponent if s == 1 # reformat 1x10^y as 10^y
else rf"{significand} \times {exponent}")
else:
return f"{significand}e{exponent}"
def get_offset(self):
"""
Return scientific notation, plus offset.
"""
if len(self.locs) == 0:
return ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0:
offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
else:
sciNotStr = '1e%d' % self.orderOfMagnitude
if self._useMathText or self._usetex:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = fr'${sciNotStr}\mathdefault{{{offsetStr}}}$'
else:
s = ''.join((sciNotStr, offsetStr))
return self.fix_minus(s)
return ''
def set_locs(self, locs):
# docstring inherited
self.locs = locs
if len(self.locs) > 0:
if self._useOffset:
self._compute_offset()
self._set_order_of_magnitude()
self._set_format()
def _compute_offset(self):
locs = self.locs
# Restrict to visible ticks.
vmin, vmax = sorted(self.axis.get_view_interval())
locs = np.asarray(locs)
locs = locs[(vmin <= locs) & (locs <= vmax)]
if not len(locs):
self.offset = 0
return
lmin, lmax = locs.min(), locs.max()
# Only use offset if there are at least two ticks and every tick has
# the same sign.
if lmin == lmax or lmin <= 0 <= lmax:
self.offset = 0
return
# min, max comparing absolute values (we want division to round towards
# zero so we work on absolute values).
abs_min, abs_max = sorted([abs(float(lmin)), abs(float(lmax))])
sign = math.copysign(1, lmin)
# What is the smallest power of ten such that abs_min and abs_max are
# equal up to that precision?
# Note: Internally using oom instead of 10 ** oom avoids some numerical
# accuracy issues.
oom_max = np.ceil(math.log10(abs_max))
oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
if abs_min // 10 ** oom != abs_max // 10 ** oom)
if (abs_max - abs_min) / 10 ** oom <= 1e-2:
# Handle the case of straddling a multiple of a large power of ten
# (relative to the span).
# What is the smallest power of ten such that abs_min and abs_max
# are no more than 1 apart at that precision?
oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
if abs_max // 10 ** oom - abs_min // 10 ** oom > 1)
# Only use offset if it saves at least _offset_threshold digits.
n = self._offset_threshold - 1
self.offset = (sign * (abs_max // 10 ** oom) * 10 ** oom
if abs_max // 10 ** oom >= 10**n
else 0)
def _set_order_of_magnitude(self):
# if scientific notation is to be used, find the appropriate exponent
# if using a numerical offset, find the exponent after applying the
# offset. When lower power limit = upper <> 0, use provided exponent.
if not self._scientific:
self.orderOfMagnitude = 0
return
if self._powerlimits[0] == self._powerlimits[1] != 0:
# fixed scaling when lower power limit = upper <> 0.
self.orderOfMagnitude = self._powerlimits[0]
return
# restrict to visible ticks
vmin, vmax = sorted(self.axis.get_view_interval())
locs = np.asarray(self.locs)
locs = locs[(vmin <= locs) & (locs <= vmax)]
locs = np.abs(locs)
if not len(locs):
self.orderOfMagnitude = 0
return
if self.offset:
oom = math.floor(math.log10(vmax - vmin))
else:
val = locs.max()
if val == 0:
oom = 0
else:
oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self):
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = [*self.locs, *self.axis.get_view_interval()]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
loc_range = np.ptp(locs)
# Curvilinear coordinates can yield two identical points.
if loc_range == 0:
loc_range = np.max(np.abs(locs))
# Both points might be zero.
if loc_range == 0:
loc_range = 1
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, 3 - loc_range_oom)
# refined estimate:
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
self.format = f'%1.{sigfigs}f'
if self._usetex or self._useMathText:
self.format = r'$\mathdefault{%s}$' % self.format
class LogFormatter(Formatter):
"""
Base class for formatting ticks on a log or symlog scale.
It may be instantiated directly, or subclassed.
Parameters
----------
base : float, default: 10.
Base of the logarithm used in all calculations.
labelOnlyBase : bool, default: False
If True, label ticks only at integer powers of base.
This is normally True for major ticks and False for
minor ticks.
minor_thresholds : (subset, all), default: (1, 0.4)
If labelOnlyBase is False, these two numbers control
the labeling of ticks that are not at integer powers of
base; normally these are the minor ticks. The controlling
parameter is the log of the axis data range. In the typical
case where base is 10 it is the number of decades spanned
by the axis, so we can call it 'numdec'. If ``numdec <= all``,
all minor ticks will be labeled. If ``all < numdec <= subset``,
then only a subset of minor ticks will be labeled, so as to
avoid crowding. If ``numdec > subset`` then no minor ticks will
be labeled.
linthresh : None or float, default: None
If a symmetric log scale is in use, its ``linthresh``
parameter must be supplied here.
Notes
-----
The `set_locs` method must be called to enable the subsetting
logic controlled by the ``minor_thresholds`` parameter.
In some cases such as the colorbar, there is no distinction between
major and minor ticks; the tick locations might be set manually,
or by a locator that puts ticks at integer powers of base and
at intermediate locations. For this situation, disable the
minor_thresholds logic by using ``minor_thresholds=(np.inf, np.inf)``,
so that all ticks will be labeled.
To disable labeling of minor ticks when 'labelOnlyBase' is False,
use ``minor_thresholds=(0, 0)``. This is the default for the
"classic" style.
Examples
--------
To label a subset of minor ticks when the view limits span up
to 2 decades, and all of the ticks when zoomed in to 0.5 decades
or less, use ``minor_thresholds=(2, 0.5)``.
To label all minor ticks when the view limits span up to 1.5
decades, use ``minor_thresholds=(1.5, 1.5)``.
"""
def __init__(self, base=10.0, labelOnlyBase=False,
minor_thresholds=None,
linthresh=None):
self.set_base(base)
self.set_label_minor(labelOnlyBase)
if minor_thresholds is None:
if mpl.rcParams['_internal.classic_mode']:
minor_thresholds = (0, 0)
else:
minor_thresholds = (1, 0.4)
self.minor_thresholds = minor_thresholds
self._sublabels = None
self._linthresh = linthresh
def set_base(self, base):
"""
Change the *base* for labeling.
.. warning::
Should always match the base used for :class:`LogLocator`
"""
self._base = float(base)
def set_label_minor(self, labelOnlyBase):
"""
Switch minor tick labeling on or off.
Parameters
----------
labelOnlyBase : bool
If True, label ticks only at integer powers of base.
"""
self.labelOnlyBase = labelOnlyBase
def set_locs(self, locs=None):
"""
Use axis view limits to control which ticks are labeled.
The *locs* parameter is ignored in the present algorithm.
"""
if np.isinf(self.minor_thresholds[0]):
self._sublabels = None
return
# Handle symlog case:
linthresh = self._linthresh
if linthresh is None:
try:
linthresh = self.axis.get_transform().linthresh
except AttributeError:
pass
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
if linthresh is None and vmin <= 0:
# It's probably a colorbar with
# a format kwarg setting a LogFormatter in the manner
# that worked with 1.5.x, but that doesn't work now.
self._sublabels = {1} # label powers of base
return
b = self._base
if linthresh is not None: # symlog
# Only compute the number of decades in the logarithmic part of the
# axis
numdec = 0
if vmin < -linthresh:
rhs = min(vmax, -linthresh)
numdec += math.log(vmin / rhs) / math.log(b)
if vmax > linthresh:
lhs = max(vmin, linthresh)
numdec += math.log(vmax / lhs) / math.log(b)
else:
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
numdec = abs(vmax - vmin)
if numdec > self.minor_thresholds[0]:
# Label only bases
self._sublabels = {1}
elif numdec > self.minor_thresholds[1]:
# Add labels between bases at log-spaced coefficients;
# include base powers in case the locations include
# "major" and "minor" points, as in colorbar.
c = np.geomspace(1, b, int(b)//2 + 1)
self._sublabels = set(np.round(c))
# For base 10, this yields (1, 2, 3, 4, 6, 10).
else:
# Label all integer multiples of base**n.
self._sublabels = set(np.arange(1, b + 1))
def _num_to_string(self, x, vmin, vmax):
return self._pprint_val(x, vmax - vmin) if 1 <= x <= 10000 else f"{x:1.0e}"
def __call__(self, x, pos=None):
# docstring inherited
if x == 0.0: # Symlog
return '0'
x = abs(x)
b = self._base
# only label the decades
fx = math.log(x) / math.log(b)
is_x_decade = _is_close_to_int(fx)
exponent = round(fx) if is_x_decade else np.floor(fx)
coeff = round(b ** (fx - exponent))
if self.labelOnlyBase and not is_x_decade:
return ''
if self._sublabels is not None and coeff not in self._sublabels:
return ''
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
s = self._num_to_string(x, vmin, vmax)
return self.fix_minus(s)
def format_data(self, value):
with cbook._setattr_cm(self, labelOnlyBase=False):
return cbook.strip_math(self.__call__(value))
def format_data_short(self, value):
# docstring inherited
return ('%-12g' % value).rstrip()
def _pprint_val(self, x, d):
# If the number is not too big and it's an int, format it as an int.
if abs(x) < 1e4 and x == int(x):
return '%d' % x
fmt = ('%1.3e' if d < 1e-2 else
'%1.3f' if d <= 1 else
'%1.2f' if d <= 10 else
'%1.1f' if d <= 1e5 else
'%1.1e')
s = fmt % x
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
exponent = int(tup[1])
if exponent:
s = '%se%d' % (mantissa, exponent)
else:
s = mantissa
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis using ``exponent = log_base(value)``.
"""
def _num_to_string(self, x, vmin, vmax):
fx = math.log(x) / math.log(self._base)
if 1 <= abs(fx) <= 10000:
fd = math.log(vmax - vmin) / math.log(self._base)
s = self._pprint_val(fx, fd)
else:
s = f"{fx:1.0g}"
return s
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis using ``exponent = log_base(value)``.
"""
def _non_decade_format(self, sign_string, base, fx, usetex):
"""Return string for non-decade locations."""
return r'$\mathdefault{%s%s^{%.2f}}$' % (sign_string, base, fx)
def __call__(self, x, pos=None):
# docstring inherited
if x == 0: # Symlog
return r'$\mathdefault{0}$'
sign_string = '-' if x < 0 else ''
x = abs(x)
b = self._base
# only label the decades
fx = math.log(x) / math.log(b)
is_x_decade = _is_close_to_int(fx)
exponent = round(fx) if is_x_decade else np.floor(fx)
coeff = round(b ** (fx - exponent))
if self.labelOnlyBase and not is_x_decade:
return ''
if self._sublabels is not None and coeff not in self._sublabels:
return ''
if is_x_decade:
fx = round(fx)
# use string formatting of the base if it is not an integer
if b % 1 == 0.0:
base = '%d' % b
else:
base = '%s' % b
if abs(fx) < mpl.rcParams['axes.formatter.min_exponent']:
return r'$\mathdefault{%s%g}$' % (sign_string, x)
elif not is_x_decade:
usetex = mpl.rcParams['text.usetex']
return self._non_decade_format(sign_string, base, fx, usetex)
else:
return r'$\mathdefault{%s%s^{%d}}$' % (sign_string, base, fx)
class LogFormatterSciNotation(LogFormatterMathtext):
"""
Format values following scientific notation in a logarithmic axis.
"""
def _non_decade_format(self, sign_string, base, fx, usetex):
"""Return string for non-decade locations."""
b = float(base)
exponent = math.floor(fx)
coeff = b ** (fx - exponent)
if _is_close_to_int(coeff):
coeff = round(coeff)
return r'$\mathdefault{%s%g\times%s^{%d}}$' \
% (sign_string, coeff, base, exponent)
class LogitFormatter(Formatter):
"""
Probability formatter (using Math text).
"""
def __init__(
self,
*,
use_overline=False,
one_half=r"\frac{1}{2}",
minor=False,
minor_threshold=25,
minor_number=6,
):
r"""
Parameters
----------
use_overline : bool, default: False
If x > 1/2, with x = 1 - v, indicate if x should be displayed as
$\overline{v}$. The default is to display $1 - v$.
one_half : str, default: r"\\frac{1}{2}"
The string used to represent 1/2.
minor : bool, default: False
Indicate if the formatter is formatting minor ticks or not.
Basically minor ticks are not labelled, except when only few ticks
are provided, ticks with most space with neighbor ticks are
labelled. See other parameters to change the default behavior.
minor_threshold : int, default: 25
Maximum number of locs for labelling some minor ticks. This
parameter have no effect if minor is False.
minor_number : int, default: 6
Number of ticks which are labelled when the number of ticks is
below the threshold.
"""
self._use_overline = use_overline
self._one_half = one_half
self._minor = minor
self._labelled = set()
self._minor_threshold = minor_threshold
self._minor_number = minor_number
def use_overline(self, use_overline):
r"""
Switch display mode with overline for labelling p>1/2.
Parameters
----------
use_overline : bool
If x > 1/2, with x = 1 - v, indicate if x should be displayed as
$\overline{v}$. The default is to display $1 - v$.
"""
self._use_overline = use_overline
def set_one_half(self, one_half):
r"""
Set the way one half is displayed.
one_half : str
The string used to represent 1/2.
"""
self._one_half = one_half
def set_minor_threshold(self, minor_threshold):
"""
Set the threshold for labelling minors ticks.
Parameters
----------
minor_threshold : int
Maximum number of locations for labelling some minor ticks. This
parameter have no effect if minor is False.
"""
self._minor_threshold = minor_threshold
def set_minor_number(self, minor_number):
"""
Set the number of minor ticks to label when some minor ticks are
labelled.
Parameters
----------
minor_number : int
Number of ticks which are labelled when the number of ticks is
below the threshold.
"""
self._minor_number = minor_number
def set_locs(self, locs):
self.locs = np.array(locs)
self._labelled.clear()
if not self._minor:
return None
if all(
_is_decade(x, rtol=1e-7)
or _is_decade(1 - x, rtol=1e-7)
or (_is_close_to_int(2 * x) and
int(np.round(2 * x)) == 1)
for x in locs
):
# minor ticks are subsample from ideal, so no label
return None
if len(locs) < self._minor_threshold:
if len(locs) < self._minor_number:
self._labelled.update(locs)
else:
# we do not have a lot of minor ticks, so only few decades are
# displayed, then we choose some (spaced) minor ticks to label.
# Only minor ticks are known, we assume it is sufficient to
# choice which ticks are displayed.
# For each ticks we compute the distance between the ticks and
# the previous, and between the ticks and the next one. Ticks
# with smallest minimum are chosen. As tiebreak, the ticks
# with smallest sum is chosen.
diff = np.diff(-np.log(1 / self.locs - 1))
space_pessimistic = np.minimum(
np.concatenate(((np.inf,), diff)),
np.concatenate((diff, (np.inf,))),
)
space_sum = (
np.concatenate(((0,), diff))
+ np.concatenate((diff, (0,)))
)
good_minor = sorted(
range(len(self.locs)),
key=lambda i: (space_pessimistic[i], space_sum[i]),
)[-self._minor_number:]
self._labelled.update(locs[i] for i in good_minor)
def _format_value(self, x, locs, sci_notation=True):
if sci_notation:
exponent = math.floor(np.log10(x))
min_precision = 0
else:
exponent = 0
min_precision = 1
value = x * 10 ** (-exponent)
if len(locs) < 2:
precision = min_precision
else:
diff = np.sort(np.abs(locs - x))[1]
precision = -np.log10(diff) + exponent
precision = (
int(np.round(precision))
if _is_close_to_int(precision)
else math.ceil(precision)
)
if precision < min_precision:
precision = min_precision
mantissa = r"%.*f" % (precision, value)
if not sci_notation:
return mantissa
s = r"%s\cdot10^{%d}" % (mantissa, exponent)
return s
def _one_minus(self, s):
if self._use_overline:
return r"\overline{%s}" % s
else:
return f"1-{s}"
def __call__(self, x, pos=None):
if self._minor and x not in self._labelled:
return ""
if x <= 0 or x >= 1:
return ""
if _is_close_to_int(2 * x) and round(2 * x) == 1:
s = self._one_half
elif x < 0.5 and _is_decade(x, rtol=1e-7):
exponent = round(math.log10(x))
s = "10^{%d}" % exponent
elif x > 0.5 and _is_decade(1 - x, rtol=1e-7):
exponent = round(math.log10(1 - x))
s = self._one_minus("10^{%d}" % exponent)
elif x < 0.1:
s = self._format_value(x, self.locs)
elif x > 0.9:
s = self._one_minus(self._format_value(1-x, 1-self.locs))
else:
s = self._format_value(x, self.locs, sci_notation=False)
return r"$\mathdefault{%s}$" % s
def format_data_short(self, value):
# docstring inherited
# Thresholds chosen to use scientific notation iff exponent <= -2.
if value < 0.1:
return f"{value:e}"
if value < 0.9:
return f"{value:f}"
return f"1-{1 - value:e}"
class EngFormatter(ScalarFormatter):
"""
Format axis values using engineering prefixes to represent powers
of 1000, plus a specified unit, e.g., 10 MHz instead of 1e7.
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-30: "q",
-27: "r",
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "\N{MICRO SIGN}",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y",
27: "R",
30: "Q"
}
def __init__(self, unit="", places=None, sep=" ", *, usetex=None,
useMathText=None, useOffset=False):
r"""
Parameters
----------
unit : str, default: ""
Unit symbol to use, suitable for use with single-letter
representations of powers of 1000. For example, 'Hz' or 'm'.
places : int, default: None
Precision with which to display the number, specified in
digits after the decimal point (there will be between one
and three digits before the decimal point). If it is None,
the formatting falls back to the floating point format '%g',
which displays up to 6 *significant* digits, i.e. the equivalent
value for *places* varies between 0 and 5 (inclusive).
sep : str, default: " "
Separator used between the value and the prefix/unit. For
example, one get '3.14 mV' if ``sep`` is " " (default) and
'3.14mV' if ``sep`` is "". Besides the default behavior, some
other useful options may be:
* ``sep=""`` to append directly the prefix/unit to the value;
* ``sep="\N{THIN SPACE}"`` (``U+2009``);
* ``sep="\N{NARROW NO-BREAK SPACE}"`` (``U+202F``);
* ``sep="\N{NO-BREAK SPACE}"`` (``U+00A0``).
usetex : bool, default: :rc:`text.usetex`
To enable/disable the use of TeX's math mode for rendering the
numbers in the formatter.
useMathText : bool, default: :rc:`axes.formatter.use_mathtext`
To enable/disable the use mathtext for rendering the numbers in
the formatter.
useOffset : bool or float, default: False
Whether to use offset notation with :math:`10^{3*N}` based prefixes.
This features allows showing an offset with standard SI order of
magnitude prefix near the axis. Offset is computed similarly to
how `ScalarFormatter` computes it internally, but here you are
guaranteed to get an offset which will make the tick labels exceed
3 digits. See also `.set_useOffset`.
.. versionadded:: 3.10
"""
self.unit = unit
self.places = places
self.sep = sep
super().__init__(
useOffset=useOffset,
useMathText=useMathText,
useLocale=False,
usetex=usetex,
)
def __call__(self, x, pos=None):
"""
Return the format for tick value *x* at position *pos*.
If there is no currently offset in the data, it returns the best
engineering formatting that fits the given argument, independently.
"""
if len(self.locs) == 0 or self.offset == 0:
return self.fix_minus(self.format_data(x))
else:
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if abs(xp) < 1e-8:
xp = 0
return self._format_maybe_minus_and_locale(self.format, xp)
def set_locs(self, locs):
# docstring inherited
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = sorted(self.axis.get_view_interval())
if self._useOffset:
self._compute_offset()
if self.offset != 0:
# We don't want to use the offset computed by
# self._compute_offset because it rounds the offset unaware
# of our engineering prefixes preference, and this can
# cause ticks with 4+ digits to appear. These ticks are
# slightly less readable, so if offset is justified
# (decided by self._compute_offset) we set it to better
# value:
self.offset = round((vmin + vmax)/2, 3)
# Use log1000 to use engineers' oom standards
self.orderOfMagnitude = math.floor(math.log(vmax - vmin, 1000))*3
self._set_format()
# Simplify a bit ScalarFormatter.get_offset: We always want to use
# self.format_data. Also we want to return a non-empty string only if there
# is an offset, no matter what is self.orderOfMagnitude. If there _is_ an
# offset, self.orderOfMagnitude is consulted. This behavior is verified
# in `test_ticker.py`.
def get_offset(self):
# docstring inherited
if len(self.locs) == 0:
return ''
if self.offset:
offsetStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0:
offsetStr = '+' + offsetStr
sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
if self._useMathText or self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = f'${sciNotStr}{offsetStr}$'
else:
s = sciNotStr + offsetStr
return self.fix_minus(s)
return ''
def format_eng(self, num):
"""Alias to EngFormatter.format_data"""
return self.format_data(num)
def format_data(self, value):
"""
Format a number in engineering notation, appending a letter
representing the power of 1000 of the original number.
Some examples:
>>> format_data(0) # for self.places = 0
'0'
>>> format_data(1000000) # for self.places = 1
'1.0 M'
>>> format_data(-1e-6) # for self.places = 2
'-1.00 \N{MICRO SIGN}'
"""
sign = 1
fmt = "g" if self.places is None else f".{self.places:d}f"
if value < 0:
sign = -1
value = -value
if value != 0:
pow10 = int(math.floor(math.log10(value) / 3) * 3)
else:
pow10 = 0
# Force value to zero, to avoid inconsistencies like
# format_eng(-0) = "0" and format_eng(0.0) = "0"
# but format_eng(-0.0) = "-0.0"
value = 0.0
pow10 = np.clip(pow10, min(self.ENG_PREFIXES), max(self.ENG_PREFIXES))
mant = sign * value / (10.0 ** pow10)
# Taking care of the cases like 999.9..., which may be rounded to 1000
# instead of 1 k. Beware of the corner case of values that are beyond
# the range of SI prefixes (i.e. > 'Y').
if (abs(float(format(mant, fmt))) >= 1000
and pow10 < max(self.ENG_PREFIXES)):
mant /= 1000
pow10 += 3
unit_prefix = self.ENG_PREFIXES[int(pow10)]
if self.unit or unit_prefix:
suffix = f"{self.sep}{unit_prefix}{self.unit}"
else:
suffix = ""
if self._usetex or self._useMathText:
return f"${mant:{fmt}}${suffix}"
else:
return f"{mant:{fmt}}{suffix}"
class PercentFormatter(Formatter):
"""
Format numbers as a percentage.
Parameters
----------
xmax : float
Determines how the number is converted into a percentage.
*xmax* is the data value that corresponds to 100%.
Percentages are computed as ``x / xmax * 100``. So if the data is
already scaled to be percentages, *xmax* will be 100. Another common
situation is where *xmax* is 1.0.
decimals : None or int
The number of decimal places to place after the point.
If *None* (the default), the number will be computed automatically.
symbol : str or None
A string that will be appended to the label. It may be
*None* or empty to indicate that no symbol should be used. LaTeX
special characters are escaped in *symbol* whenever latex mode is
enabled, unless *is_latex* is *True*.
is_latex : bool
If *False*, reserved LaTeX characters in *symbol* will be escaped.
"""
def __init__(self, xmax=100, decimals=None, symbol='%', is_latex=False):
self.xmax = xmax + 0.0
self.decimals = decimals
self._symbol = symbol
self._is_latex = is_latex
def __call__(self, x, pos=None):
"""Format the tick as a percentage with the appropriate scaling."""
ax_min, ax_max = self.axis.get_view_interval()
display_range = abs(ax_max - ax_min)
return self.fix_minus(self.format_pct(x, display_range))
def format_pct(self, x, display_range):
"""
Format the number as a percentage number with the correct
number of decimals and adds the percent symbol, if any.
If ``self.decimals`` is `None`, the number of digits after the
decimal point is set based on the *display_range* of the axis
as follows:
============= ======== =======================
display_range decimals sample
============= ======== =======================
>50 0 ``x = 34.5`` => 35%
>5 1 ``x = 34.5`` => 34.5%
>0.5 2 ``x = 34.5`` => 34.50%
... ... ...
============= ======== =======================
This method will not be very good for tiny axis ranges or
extremely large ones. It assumes that the values on the chart
are percentages displayed on a reasonable scale.
"""
x = self.convert_to_pct(x)
if self.decimals is None:
# conversion works because display_range is a difference
scaled_range = self.convert_to_pct(display_range)
if scaled_range <= 0:
decimals = 0
else:
# Luckily Python's built-in ceil rounds to +inf, not away from
# zero. This is very important since the equation for decimals
# starts out as `scaled_range > 0.5 * 10**(2 - decimals)`
# and ends up with `decimals > 2 - log10(2 * scaled_range)`.
decimals = math.ceil(2.0 - math.log10(2.0 * scaled_range))
if decimals > 5:
decimals = 5
elif decimals < 0:
decimals = 0
else:
decimals = self.decimals
s = f'{x:0.{int(decimals)}f}'
return s + self.symbol
def convert_to_pct(self, x):
return 100.0 * (x / self.xmax)
@property
def symbol(self):
r"""
The configured percent symbol as a string.
If LaTeX is enabled via :rc:`text.usetex`, the special characters
``{'#', '$', '%', '&', '~', '_', '^', '\', '{', '}'}`` are
automatically escaped in the string.
"""
symbol = self._symbol
if not symbol:
symbol = ''
elif not self._is_latex and mpl.rcParams['text.usetex']:
# Source: http://www.personal.ceu.hu/tex/specchar.htm
# Backslash must be first for this to work correctly since
# it keeps getting added in
for spec in r'\#$%&~_^{}':
symbol = symbol.replace(spec, '\\' + spec)
return symbol
@symbol.setter
def symbol(self, symbol):
self._symbol = symbol
class Locator(TickHelper):
"""
Determine tick locations.
Note that the same locator should not be used across multiple
`~matplotlib.axis.Axis` because the locator stores references to the Axis
data and view limits.
"""
# Some automatic tick locators can generate so many ticks they
# kill the machine when you try and render them.
# This parameter is set to cause locators to raise an error if too
# many ticks are generated.
MAXTICKS = 1000
def tick_values(self, vmin, vmax):
"""
Return the values of the located ticks given **vmin** and **vmax**.
.. note::
To get tick locations with the vmin and vmax values defined
automatically for the associated ``axis`` simply call
the Locator instance::
>>> print(type(loc))
>>> print(loc())
[1, 2, 3, 4]
"""
raise NotImplementedError('Derived must override')
def set_params(self, **kwargs):
"""
Do nothing, and raise a warning. Any locator class not supporting the
set_params() function will call this.
"""
_api.warn_external(
"'set_params()' not defined for locator of type " +
str(type(self)))
def __call__(self):
"""Return the locations of the ticks."""
# note: some locators return data limits, other return view limits,
# hence there is no *one* interface to call self.tick_values.
raise NotImplementedError('Derived must override')
def raise_if_exceeds(self, locs):
"""
Log at WARNING level if *locs* is longer than `Locator.MAXTICKS`.
This is intended to be called immediately before returning *locs* from
``__call__`` to inform users in case their Locator returns a huge
number of ticks, causing Matplotlib to run out of memory.
The "strange" name of this method dates back to when it would raise an
exception instead of emitting a log.
"""
if len(locs) >= self.MAXTICKS:
_log.warning(
"Locator attempting to generate %s ticks ([%s, ..., %s]), "
"which exceeds Locator.MAXTICKS (%s).",
len(locs), locs[0], locs[-1], self.MAXTICKS)
return locs
def nonsingular(self, v0, v1):
"""
Adjust a range as needed to avoid singularities.
This method gets called during autoscaling, with ``(v0, v1)`` set to
the data limits on the Axes if the Axes contains any data, or
``(-inf, +inf)`` if not.
- If ``v0 == v1`` (possibly up to some floating point slop), this
method returns an expanded interval around this value.
- If ``(v0, v1) == (-inf, +inf)``, this method returns appropriate
default view limits.
- Otherwise, ``(v0, v1)`` is returned without modification.
"""
return mtransforms.nonsingular(v0, v1, expander=.05)
def view_limits(self, vmin, vmax):
"""
Select a scale for the range from vmin to vmax.
Subclasses should override this method to change locator behaviour.
"""
return mtransforms.nonsingular(vmin, vmax)
class IndexLocator(Locator):
"""
Place ticks at every nth point plotted.
IndexLocator assumes index plotting; i.e., that the ticks are placed at integer
values in the range between 0 and len(data) inclusive.
"""
def __init__(self, base, offset):
"""Place ticks every *base* data point, starting at *offset*."""
self._base = base
self.offset = offset
def set_params(self, base=None, offset=None):
"""Set parameters within this locator"""
if base is not None:
self._base = base
if offset is not None:
self.offset = offset
def __call__(self):
"""Return the locations of the ticks"""
dmin, dmax = self.axis.get_data_interval()
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
return self.raise_if_exceeds(
np.arange(vmin + self.offset, vmax + 1, self._base))
class FixedLocator(Locator):
r"""
Place ticks at a set of fixed values.
If *nbins* is None ticks are placed at all values. Otherwise, the *locs* array of
possible positions will be subsampled to keep the number of ticks
:math:`\leq nbins + 1`. The subsampling will be done to include the smallest
absolute value; for example, if zero is included in the array of possibilities, then
it will be included in the chosen ticks.
"""
def __init__(self, locs, nbins=None):
self.locs = np.asarray(locs)
_api.check_shape((None,), locs=self.locs)
self.nbins = max(nbins, 2) if nbins is not None else None
def set_params(self, nbins=None):
"""Set parameters within this locator."""
if nbins is not None:
self.nbins = nbins
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
"""
Return the locations of the ticks.
.. note::
Because the values are fixed, vmin and vmax are not used in this
method.
"""
if self.nbins is None:
return self.locs
step = max(int(np.ceil(len(self.locs) / self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.abs(ticks1).min() < np.abs(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
"""
Return the locations of the ticks.
.. note::
Because the values are Null, vmin and vmax are not used in this
method.
"""
return []
class LinearLocator(Locator):
"""
Place ticks at evenly spaced values.
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter, the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks=None, presets=None):
"""
Parameters
----------
numticks : int or None, default None
Number of ticks. If None, *numticks* = 11.
presets : dict or None, default: None
Dictionary mapping ``(vmin, vmax)`` to an array of locations.
Overrides *numticks* if there is an entry for the current
``(vmin, vmax)``.
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
@property
def numticks(self):
# Old hard-coded default.
return self._numticks if self._numticks is not None else 11
@numticks.setter
def numticks(self, numticks):
self._numticks = numticks
def set_params(self, numticks=None, presets=None):
"""Set parameters within this locator."""
if presets is not None:
self.presets = presets
if numticks is not None:
self.numticks = numticks
def __call__(self):
"""Return the locations of the ticks."""
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks == 0:
return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return self.raise_if_exceeds(ticklocs)
def view_limits(self, vmin, vmax):
"""Try to choose the view limits intelligently."""
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
exponent, remainder = divmod(
math.log10(vmax - vmin), math.log10(max(self.numticks - 1, 1)))
exponent -= (remainder < .5)
scale = max(self.numticks - 1, 1) ** (-exponent)
vmin = math.floor(scale * vmin) / scale
vmax = math.ceil(scale * vmax) / scale
return mtransforms.nonsingular(vmin, vmax)
class MultipleLocator(Locator):
"""
Place ticks at every integer multiple of a base plus an offset.
"""
def __init__(self, base=1.0, offset=0.0):
"""
Parameters
----------
base : float > 0, default: 1.0
Interval between ticks.
offset : float, default: 0.0
Value added to each multiple of *base*.
.. versionadded:: 3.8
"""
self._edge = _Edge_integer(base, 0)
self._offset = offset
def set_params(self, base=None, offset=None):
"""
Set parameters within this locator.
Parameters
----------
base : float > 0, optional
Interval between ticks.
offset : float, optional
Value added to each multiple of *base*.
.. versionadded:: 3.8
"""
if base is not None:
self._edge = _Edge_integer(base, 0)
if offset is not None:
self._offset = offset
def __call__(self):
"""Return the locations of the ticks."""
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if vmax < vmin:
vmin, vmax = vmax, vmin
step = self._edge.step
vmin -= self._offset
vmax -= self._offset
vmin = self._edge.ge(vmin) * step
n = (vmax - vmin + 0.001 * step) // step
locs = vmin - step + np.arange(n + 3) * step + self._offset
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest tick values that contain the data.
"""
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
vmin = self._edge.le(dmin - self._offset) * self._edge.step + self._offset
vmax = self._edge.ge(dmax - self._offset) * self._edge.step + self._offset
if vmin == vmax:
vmin -= 1
vmax += 1
else:
vmin = dmin
vmax = dmax
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin) # > 0 as nonsingular is called before.
meanv = (vmax + vmin) / 2
if abs(meanv) / dv < threshold:
offset = 0
else:
offset = math.copysign(10 ** (math.log10(abs(meanv)) // 1), meanv)
scale = 10 ** (math.log10(dv / n) // 1)
return scale, offset
class _Edge_integer:
"""
Helper for `.MaxNLocator`, `.MultipleLocator`, etc.
Take floating-point precision limitations into account when calculating
tick locations as integer multiples of a step.
"""
def __init__(self, step, offset):
"""
Parameters
----------
step : float > 0
Interval between ticks.
offset : float
Offset subtracted from the data limits prior to calculating tick
locations.
"""
if step <= 0:
raise ValueError("'step' must be positive")
self.step = step
self._offset = abs(offset)
def closeto(self, ms, edge):
# Allow more slop when the offset is large compared to the step.
if self._offset > 0:
digits = np.log10(self._offset / self.step)
tol = max(1e-10, 10 ** (digits - 12))
tol = min(0.4999, tol)
else:
tol = 1e-10
return abs(ms - edge) < tol
def le(self, x):
"""Return the largest n: n*step <= x."""
d, m = divmod(x, self.step)
if self.closeto(m / self.step, 1):
return d + 1
return d
def ge(self, x):
"""Return the smallest n: n*step >= x."""
d, m = divmod(x, self.step)
if self.closeto(m / self.step, 0):
return d
return d + 1
class MaxNLocator(Locator):
"""
Place evenly spaced ticks, with a cap on the total number of ticks.
Finds nice tick locations with no more than :math:`nbins + 1` ticks being within the
view limits. Locations beyond the limits are added to support autoscaling.
"""
default_params = dict(nbins=10,
steps=None,
integer=False,
symmetric=False,
prune=None,
min_n_ticks=2)
def __init__(self, nbins=None, **kwargs):
"""
Parameters
----------
nbins : int or 'auto', default: 10
Maximum number of intervals; one less than max number of
ticks. If the string 'auto', the number of bins will be
automatically determined based on the length of the axis.
steps : array-like, optional
Sequence of acceptable tick multiples, starting with 1 and
ending with 10. For example, if ``steps=[1, 2, 4, 5, 10]``,
``20, 40, 60`` or ``0.4, 0.6, 0.8`` would be possible
sets of ticks because they are multiples of 2.
``30, 60, 90`` would not be generated because 3 does not
appear in this example list of steps.
integer : bool, default: False
If True, ticks will take only integer values, provided at least
*min_n_ticks* integers are found within the view limits.
symmetric : bool, default: False
If True, autoscaling will result in a range symmetric about zero.
prune : {'lower', 'upper', 'both', None}, default: None
Remove the 'lower' tick, the 'upper' tick, or ticks on 'both' sides
*if they fall exactly on an axis' edge* (this typically occurs when
:rc:`axes.autolimit_mode` is 'round_numbers'). Removing such ticks
is mostly useful for stacked or ganged plots, where the upper tick
of an Axes overlaps with the lower tick of the axes above it.
min_n_ticks : int, default: 2
Relax *nbins* and *integer* constraints if necessary to obtain
this minimum number of ticks.
"""
if nbins is not None:
kwargs['nbins'] = nbins
self.set_params(**{**self.default_params, **kwargs})
@staticmethod
def _validate_steps(steps):
if not np.iterable(steps):
raise ValueError('steps argument must be an increasing sequence '
'of numbers between 1 and 10 inclusive')
steps = np.asarray(steps)
if np.any(np.diff(steps) <= 0) or steps[-1] > 10 or steps[0] < 1:
raise ValueError('steps argument must be an increasing sequence '
'of numbers between 1 and 10 inclusive')
if steps[0] != 1:
steps = np.concatenate([[1], steps])
if steps[-1] != 10:
steps = np.concatenate([steps, [10]])
return steps
@staticmethod
def _staircase(steps):
# Make an extended staircase within which the needed step will be
# found. This is probably much larger than necessary.
return np.concatenate([0.1 * steps[:-1], steps, [10 * steps[1]]])
def set_params(self, **kwargs):
"""
Set parameters for this locator.
Parameters
----------
nbins : int or 'auto', optional
see `.MaxNLocator`
steps : array-like, optional
see `.MaxNLocator`
integer : bool, optional
see `.MaxNLocator`
symmetric : bool, optional
see `.MaxNLocator`
prune : {'lower', 'upper', 'both', None}, optional
see `.MaxNLocator`
min_n_ticks : int, optional
see `.MaxNLocator`
"""
if 'nbins' in kwargs:
self._nbins = kwargs.pop('nbins')
if self._nbins != 'auto':
self._nbins = int(self._nbins)
if 'symmetric' in kwargs:
self._symmetric = kwargs.pop('symmetric')
if 'prune' in kwargs:
prune = kwargs.pop('prune')
_api.check_in_list(['upper', 'lower', 'both', None], prune=prune)
self._prune = prune
if 'min_n_ticks' in kwargs:
self._min_n_ticks = max(1, kwargs.pop('min_n_ticks'))
if 'steps' in kwargs:
steps = kwargs.pop('steps')
if steps is None:
self._steps = np.array([1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10])
else:
self._steps = self._validate_steps(steps)
self._extended_steps = self._staircase(self._steps)
if 'integer' in kwargs:
self._integer = kwargs.pop('integer')
if kwargs:
raise _api.kwarg_error("set_params", kwargs)
def _raw_ticks(self, vmin, vmax):
"""
Generate a list of tick locations including the range *vmin* to
*vmax*. In some applications, one or both of the end locations
will not be needed, in which case they are trimmed off
elsewhere.
"""
if self._nbins == 'auto':
if self.axis is not None:
nbins = np.clip(self.axis.get_tick_space(),
max(1, self._min_n_ticks - 1), 9)
else:
nbins = 9
else:
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
_vmin = vmin - offset
_vmax = vmax - offset
steps = self._extended_steps * scale
if self._integer:
# For steps > 1, keep only integer values.
igood = (steps < 1) | (np.abs(steps - np.round(steps)) < 0.001)
steps = steps[igood]
raw_step = ((_vmax - _vmin) / nbins)
if hasattr(self.axis, "axes") and self.axis.axes.name == '3d':
# Due to the change in automargin behavior in mpl3.9, we need to
# adjust the raw step to match the mpl3.8 appearance. The zoom
# factor of 2/48, gives us the 23/24 modifier.
raw_step = raw_step * 23/24
large_steps = steps >= raw_step
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
# Classic round_numbers mode may require a larger step.
# Get first multiple of steps that are <= _vmin
floored_vmins = (_vmin // steps) * steps
floored_vmaxs = floored_vmins + steps * nbins
large_steps = large_steps & (floored_vmaxs >= _vmax)
# Find index of smallest large step
if any(large_steps):
istep = np.nonzero(large_steps)[0][0]
else:
istep = len(steps) - 1
# Start at smallest of the steps greater than the raw step, and check
# if it provides enough ticks. If not, work backwards through
# smaller steps until one is found that provides enough ticks.
for step in steps[:istep+1][::-1]:
if (self._integer and
np.floor(_vmax) - np.ceil(_vmin) >= self._min_n_ticks - 1):
step = max(1, step)
best_vmin = (_vmin // step) * step
# Find tick locations spanning the vmin-vmax range, taking into
# account degradation of precision when there is a large offset.
# The edge ticks beyond vmin and/or vmax are needed for the
# "round_numbers" autolimit mode.
edge = _Edge_integer(step, offset)
low = edge.le(_vmin - best_vmin)
high = edge.ge(_vmax - best_vmin)
ticks = np.arange(low, high + 1) * step + best_vmin
# Count only the ticks that will be displayed.
nticks = ((ticks <= _vmax) & (ticks >= _vmin)).sum()
if nticks >= self._min_n_ticks:
break
return ticks + offset
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if self._symmetric:
vmax = max(abs(vmin), abs(vmax))
vmin = -vmax
vmin, vmax = mtransforms.nonsingular(
vmin, vmax, expander=1e-13, tiny=1e-14)
locs = self._raw_ticks(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
if self._symmetric:
dmax = max(abs(dmin), abs(dmax))
dmin = -dmax
dmin, dmax = mtransforms.nonsingular(
dmin, dmax, expander=1e-12, tiny=1e-13)
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
return self._raw_ticks(dmin, dmax)[[0, -1]]
else:
return dmin, dmax
def _is_decade(x, *, base=10, rtol=None):
"""Return True if *x* is an integer power of *base*."""
if not np.isfinite(x):
return False
if x == 0.0:
return True
lx = np.log(abs(x)) / np.log(base)
if rtol is None:
return np.isclose(lx, np.round(lx))
else:
return np.isclose(lx, np.round(lx), rtol=rtol)
def _decade_less_equal(x, base):
"""
Return the largest integer power of *base* that's less or equal to *x*.
If *x* is negative, the exponent will be *greater*.
"""
return (x if x == 0 else
-_decade_greater_equal(-x, base) if x < 0 else
base ** np.floor(np.log(x) / np.log(base)))
def _decade_greater_equal(x, base):
"""
Return the smallest integer power of *base* that's greater or equal to *x*.
If *x* is negative, the exponent will be *smaller*.
"""
return (x if x == 0 else
-_decade_less_equal(-x, base) if x < 0 else
base ** np.ceil(np.log(x) / np.log(base)))
def _decade_less(x, base):
"""
Return the largest integer power of *base* that's less than *x*.
If *x* is negative, the exponent will be *greater*.
"""
if x < 0:
return -_decade_greater(-x, base)
less = _decade_less_equal(x, base)
if less == x:
less /= base
return less
def _decade_greater(x, base):
"""
Return the smallest integer power of *base* that's greater than *x*.
If *x* is negative, the exponent will be *smaller*.
"""
if x < 0:
return -_decade_less(-x, base)
greater = _decade_greater_equal(x, base)
if greater == x:
greater *= base
return greater
def _is_close_to_int(x):
return math.isclose(x, round(x))
class LogLocator(Locator):
"""
Place logarithmically spaced ticks.
Places ticks at the values ``subs[j] * base**i``.
"""
def __init__(self, base=10.0, subs=(1.0,), *, numticks=None):
"""
Parameters
----------
base : float, default: 10.0
The base of the log used, so major ticks are placed at ``base**n``, where
``n`` is an integer.
subs : None or {'auto', 'all'} or sequence of float, default: (1.0,)
Gives the multiples of integer powers of the base at which to place ticks.
The default of ``(1.0, )`` places ticks only at integer powers of the base.
Permitted string values are ``'auto'`` and ``'all'``. Both of these use an
algorithm based on the axis view limits to determine whether and how to put
ticks between integer powers of the base:
- ``'auto'``: Ticks are placed only between integer powers.
- ``'all'``: Ticks are placed between *and* at integer powers.
- ``None``: Equivalent to ``'auto'``.
numticks : None or int, default: None
The maximum number of ticks to allow on a given axis. The default of
``None`` will try to choose intelligently as long as this Locator has
already been assigned to an axis using `~.axis.Axis.get_tick_space`, but
otherwise falls back to 9.
"""
if numticks is None:
if mpl.rcParams['_internal.classic_mode']:
numticks = 15
else:
numticks = 'auto'
self._base = float(base)
self._set_subs(subs)
self.numticks = numticks
def set_params(self, base=None, subs=None, *, numticks=None):
"""Set parameters within this locator."""
if base is not None:
self._base = float(base)
if subs is not None:
self._set_subs(subs)
if numticks is not None:
self.numticks = numticks
def _set_subs(self, subs):
"""
Set the minor ticks for the log scaling every ``base**i*subs[j]``.
"""
if subs is None: # consistency with previous bad API
self._subs = 'auto'
elif isinstance(subs, str):
_api.check_in_list(('all', 'auto'), subs=subs)
self._subs = subs
else:
try:
self._subs = np.asarray(subs, dtype=float)
except ValueError as e:
raise ValueError("subs must be None, 'all', 'auto' or "
"a sequence of floats, not "
f"{subs}.") from e
if self._subs.ndim != 1:
raise ValueError("A sequence passed to subs must be "
"1-dimensional, not "
f"{self._subs.ndim}-dimensional.")
def __call__(self):
"""Return the locations of the ticks."""
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if self.numticks == 'auto':
if self.axis is not None:
numticks = np.clip(self.axis.get_tick_space(), 2, 9)
else:
numticks = 9
else:
numticks = self.numticks
b = self._base
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if vmin <= 0.0 or not np.isfinite(vmin):
raise ValueError(
"Data has no positive values, and therefore cannot be log-scaled.")
_log.debug('vmin %s vmax %s', vmin, vmax)
if vmax < vmin:
vmin, vmax = vmax, vmin
log_vmin = math.log(vmin) / math.log(b)
log_vmax = math.log(vmax) / math.log(b)
numdec = math.floor(log_vmax) - math.ceil(log_vmin)
if isinstance(self._subs, str):
if numdec > 10 or b < 3:
if self._subs == 'auto':
return np.array([]) # no minor or major ticks
else:
subs = np.array([1.0]) # major ticks
else:
_first = 2.0 if self._subs == 'auto' else 1.0
subs = np.arange(_first, b)
else:
subs = self._subs
# Get decades between major ticks.
stride = (max(math.ceil(numdec / (numticks - 1)), 1)
if mpl.rcParams['_internal.classic_mode'] else
numdec // numticks + 1)
# if we have decided that the stride is as big or bigger than
# the range, clip the stride back to the available range - 1
# with a floor of 1. This prevents getting axis with only 1 tick
# visible.
if stride >= numdec:
stride = max(1, numdec - 1)
# Does subs include anything other than 1? Essentially a hack to know
# whether we're a major or a minor locator.
have_subs = len(subs) > 1 or (len(subs) == 1 and subs[0] != 1.0)
decades = np.arange(math.floor(log_vmin) - stride,
math.ceil(log_vmax) + 2 * stride, stride)
if have_subs:
if stride == 1:
ticklocs = np.concatenate(
[subs * decade_start for decade_start in b ** decades])
else:
ticklocs = np.array([])
else:
ticklocs = b ** decades
_log.debug('ticklocs %r', ticklocs)
if (len(subs) > 1
and stride == 1
and ((vmin <= ticklocs) & (ticklocs <= vmax)).sum() <= 1):
# If we're a minor locator *that expects at least two ticks per
# decade* and the major locator stride is 1 and there's no more
# than one minor tick, switch to AutoLocator.
return AutoLocator().tick_values(vmin, vmax)
else:
return self.raise_if_exceeds(ticklocs)
def view_limits(self, vmin, vmax):
"""Try to choose the view limits intelligently."""
b = self._base
vmin, vmax = self.nonsingular(vmin, vmax)
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
vmin = _decade_less_equal(vmin, b)
vmax = _decade_greater_equal(vmax, b)
return vmin, vmax
def nonsingular(self, vmin, vmax):
if vmin > vmax:
vmin, vmax = vmax, vmin
if not np.isfinite(vmin) or not np.isfinite(vmax):
vmin, vmax = 1, 10 # Initial range, no data plotted yet.
elif vmax <= 0:
_api.warn_external(
"Data has no positive values, and therefore cannot be "
"log-scaled.")
vmin, vmax = 1, 10
else:
# Consider shared axises
minpos = min(axis.get_minpos() for axis in self.axis._get_shared_axis())
if not np.isfinite(minpos):
minpos = 1e-300 # This should never take effect.
if vmin <= 0:
vmin = minpos
if vmin == vmax:
vmin = _decade_less(vmin, self._base)
vmax = _decade_greater(vmax, self._base)
return vmin, vmax
class SymmetricalLogLocator(Locator):
"""
Place ticks spaced linearly near zero and spaced logarithmically beyond a threshold.
"""
def __init__(self, transform=None, subs=None, linthresh=None, base=None):
"""
Parameters
----------
transform : `~.scale.SymmetricalLogTransform`, optional
If set, defines the *base* and *linthresh* of the symlog transform.
base, linthresh : float, optional
The *base* and *linthresh* of the symlog transform, as documented
for `.SymmetricalLogScale`. These parameters are only used if
*transform* is not set.
subs : sequence of float, default: [1]
The multiples of integer powers of the base where ticks are placed,
i.e., ticks are placed at
``[sub * base**i for i in ... for sub in subs]``.
Notes
-----
Either *transform*, or both *base* and *linthresh*, must be given.
"""
if transform is not None:
self._base = transform.base
self._linthresh = transform.linthresh
elif linthresh is not None and base is not None:
self._base = base
self._linthresh = linthresh
else:
raise ValueError("Either transform, or both linthresh "
"and base, must be provided.")
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def set_params(self, subs=None, numticks=None):
"""Set parameters within this locator."""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
def __call__(self):
"""Return the locations of the ticks."""
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
linthresh = self._linthresh
if vmax < vmin:
vmin, vmax = vmax, vmin
# The domain is divided into three sections, only some of
# which may actually be present.
#
# <======== -t ==0== t ========>
# aaaaaaaaa bbbbb ccccccccc
#
# a) and c) will have ticks at integral log positions. The
# number of ticks needs to be reduced if there are more
# than self.numticks of them.
#
# b) has a tick at 0 and only 0 (we assume t is a small
# number, and the linear segment is just an implementation
# detail and not interesting.)
#
# We could also add ticks at t, but that seems to usually be
# uninteresting.
#
# "simple" mode is when the range falls entirely within [-t, t]
# -- it should just display (vmin, 0, vmax)
if -linthresh <= vmin < vmax <= linthresh:
# only the linear range is present
return sorted({vmin, 0, vmax})
# Lower log range is present
has_a = (vmin < -linthresh)
# Upper log range is present
has_c = (vmax > linthresh)
# Check if linear range is present
has_b = (has_a and vmax > -linthresh) or (has_c and vmin < linthresh)
base = self._base
def get_log_range(lo, hi):
lo = np.floor(np.log(lo) / np.log(base))
hi = np.ceil(np.log(hi) / np.log(base))
return lo, hi
# Calculate all the ranges, so we can determine striding
a_lo, a_hi = (0, 0)
if has_a:
a_upper_lim = min(-linthresh, vmax)
a_lo, a_hi = get_log_range(abs(a_upper_lim), abs(vmin) + 1)
c_lo, c_hi = (0, 0)
if has_c:
c_lower_lim = max(linthresh, vmin)
c_lo, c_hi = get_log_range(c_lower_lim, vmax + 1)
# Calculate the total number of integer exponents in a and c ranges
total_ticks = (a_hi - a_lo) + (c_hi - c_lo)
if has_b:
total_ticks += 1
stride = max(total_ticks // (self.numticks - 1), 1)
decades = []
if has_a:
decades.extend(-1 * (base ** (np.arange(a_lo, a_hi,
stride)[::-1])))
if has_b:
decades.append(0.0)
if has_c:
decades.extend(base ** (np.arange(c_lo, c_hi, stride)))
subs = np.asarray(self._subs)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
if decade == 0:
ticklocs.append(decade)
else:
ticklocs.extend(subs * decade)
else:
ticklocs = decades
return self.raise_if_exceeds(np.array(ticklocs))
def view_limits(self, vmin, vmax):
"""Try to choose the view limits intelligently."""
b = self._base
if vmax < vmin:
vmin, vmax = vmax, vmin
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
vmin = _decade_less_equal(vmin, b)
vmax = _decade_greater_equal(vmax, b)
if vmin == vmax:
vmin = _decade_less(vmin, b)
vmax = _decade_greater(vmax, b)
return mtransforms.nonsingular(vmin, vmax)
class AsinhLocator(Locator):
"""
Place ticks spaced evenly on an inverse-sinh scale.
Generally used with the `~.scale.AsinhScale` class.
.. note::
This API is provisional and may be revised in the future
based on early user feedback.
"""
def __init__(self, linear_width, numticks=11, symthresh=0.2,
base=10, subs=None):
"""
Parameters
----------
linear_width : float
The scale parameter defining the extent
of the quasi-linear region.
numticks : int, default: 11
The approximate number of major ticks that will fit
along the entire axis
symthresh : float, default: 0.2
The fractional threshold beneath which data which covers
a range that is approximately symmetric about zero
will have ticks that are exactly symmetric.
base : int, default: 10
The number base used for rounding tick locations
on a logarithmic scale. If this is less than one,
then rounding is to the nearest integer multiple
of powers of ten.
subs : tuple, default: None
Multiples of the number base, typically used
for the minor ticks, e.g. (2, 5) when base=10.
"""
super().__init__()
self.linear_width = linear_width
self.numticks = numticks
self.symthresh = symthresh
self.base = base
self.subs = subs
def set_params(self, numticks=None, symthresh=None,
base=None, subs=None):
"""Set parameters within this locator."""
if numticks is not None:
self.numticks = numticks
if symthresh is not None:
self.symthresh = symthresh
if base is not None:
self.base = base
if subs is not None:
self.subs = subs if len(subs) > 0 else None
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
if (vmin * vmax) < 0 and abs(1 + vmax / vmin) < self.symthresh:
# Data-range appears to be almost symmetric, so round up:
bound = max(abs(vmin), abs(vmax))
return self.tick_values(-bound, bound)
else:
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
# Construct a set of uniformly-spaced "on-screen" locations.
ymin, ymax = self.linear_width * np.arcsinh(np.array([vmin, vmax])
/ self.linear_width)
ys = np.linspace(ymin, ymax, self.numticks)
zero_dev = abs(ys / (ymax - ymin))
if ymin * ymax < 0:
# Ensure that the zero tick-mark is included, if the axis straddles zero.
ys = np.hstack([ys[(zero_dev > 0.5 / self.numticks)], 0.0])
# Transform the "on-screen" grid to the data space:
xs = self.linear_width * np.sinh(ys / self.linear_width)
zero_xs = (ys == 0)
# Round the data-space values to be intuitive base-n numbers, keeping track of
# positive and negative values separately and carefully treating the zero value.
with np.errstate(divide="ignore"): # base ** log(0) = base ** -inf = 0.
if self.base > 1:
pows = (np.sign(xs)
* self.base ** np.floor(np.log(abs(xs)) / math.log(self.base)))
qs = np.outer(pows, self.subs).flatten() if self.subs else pows
else: # No need to adjust sign(pows), as it cancels out when computing qs.
pows = np.where(zero_xs, 1, 10**np.floor(np.log10(abs(xs))))
qs = pows * np.round(xs / pows)
ticks = np.array(sorted(set(qs)))
return ticks if len(ticks) >= 2 else np.linspace(vmin, vmax, self.numticks)
class LogitLocator(MaxNLocator):
"""
Place ticks spaced evenly on a logit scale.
"""
def __init__(self, minor=False, *, nbins="auto"):
"""
Parameters
----------
nbins : int or 'auto', optional
Number of ticks. Only used if minor is False.
minor : bool, default: False
Indicate if this locator is for minor ticks or not.
"""
self._minor = minor
super().__init__(nbins=nbins, steps=[1, 2, 5, 10])
def set_params(self, minor=None, **kwargs):
"""Set parameters within this locator."""
if minor is not None:
self._minor = minor
super().set_params(**kwargs)
@property
def minor(self):
return self._minor
@minor.setter
def minor(self, value):
self.set_params(minor=value)
def tick_values(self, vmin, vmax):
# dummy axis has no axes attribute
if hasattr(self.axis, "axes") and self.axis.axes.name == "polar":
raise NotImplementedError("Polar axis cannot be logit scaled yet")
if self._nbins == "auto":
if self.axis is not None:
nbins = self.axis.get_tick_space()
if nbins < 2:
nbins = 2
else:
nbins = 9
else:
nbins = self._nbins
# We define ideal ticks with their index:
# linscale: ... 1e-3 1e-2 1e-1 1/2 1-1e-1 1-1e-2 1-1e-3 ...
# b-scale : ... -3 -2 -1 0 1 2 3 ...
def ideal_ticks(x):
return 10 ** x if x < 0 else 1 - (10 ** (-x)) if x > 0 else 0.5
vmin, vmax = self.nonsingular(vmin, vmax)
binf = int(
np.floor(np.log10(vmin))
if vmin < 0.5
else 0
if vmin < 0.9
else -np.ceil(np.log10(1 - vmin))
)
bsup = int(
np.ceil(np.log10(vmax))
if vmax <= 0.5
else 1
if vmax <= 0.9
else -np.floor(np.log10(1 - vmax))
)
numideal = bsup - binf - 1
if numideal >= 2:
# have 2 or more wanted ideal ticks, so use them as major ticks
if numideal > nbins:
# to many ideal ticks, subsampling ideals for major ticks, and
# take others for minor ticks
subsampling_factor = math.ceil(numideal / nbins)
if self._minor:
ticklocs = [
ideal_ticks(b)
for b in range(binf, bsup + 1)
if (b % subsampling_factor) != 0
]
else:
ticklocs = [
ideal_ticks(b)
for b in range(binf, bsup + 1)
if (b % subsampling_factor) == 0
]
return self.raise_if_exceeds(np.array(ticklocs))
if self._minor:
ticklocs = []
for b in range(binf, bsup):
if b < -1:
ticklocs.extend(np.arange(2, 10) * 10 ** b)
elif b == -1:
ticklocs.extend(np.arange(2, 5) / 10)
elif b == 0:
ticklocs.extend(np.arange(6, 9) / 10)
else:
ticklocs.extend(
1 - np.arange(2, 10)[::-1] * 10 ** (-b - 1)
)
return self.raise_if_exceeds(np.array(ticklocs))
ticklocs = [ideal_ticks(b) for b in range(binf, bsup + 1)]
return self.raise_if_exceeds(np.array(ticklocs))
# the scale is zoomed so same ticks as linear scale can be used
if self._minor:
return []
return super().tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
standard_minpos = 1e-7
initial_range = (standard_minpos, 1 - standard_minpos)
if vmin > vmax:
vmin, vmax = vmax, vmin
if not np.isfinite(vmin) or not np.isfinite(vmax):
vmin, vmax = initial_range # Initial range, no data plotted yet.
elif vmax <= 0 or vmin >= 1:
# vmax <= 0 occurs when all values are negative
# vmin >= 1 occurs when all values are greater than one
_api.warn_external(
"Data has no values between 0 and 1, and therefore cannot be "
"logit-scaled."
)
vmin, vmax = initial_range
else:
minpos = (
self.axis.get_minpos()
if self.axis is not None
else standard_minpos
)
if not np.isfinite(minpos):
minpos = standard_minpos # This should never take effect.
if vmin <= 0:
vmin = minpos
# NOTE: for vmax, we should query a property similar to get_minpos,
# but related to the maximal, less-than-one data point.
# Unfortunately, Bbox._minpos is defined very deep in the BBox and
# updated with data, so for now we use 1 - minpos as a substitute.
if vmax >= 1:
vmax = 1 - minpos
if vmin == vmax:
vmin, vmax = 0.1 * vmin, 1 - 0.1 * vmin
return vmin, vmax
class AutoLocator(MaxNLocator):
"""
Place evenly spaced ticks, with the step size and maximum number of ticks chosen
automatically.
This is a subclass of `~matplotlib.ticker.MaxNLocator`, with parameters
*nbins = 'auto'* and *steps = [1, 2, 2.5, 5, 10]*.
"""
def __init__(self):
"""
To know the values of the non-public parameters, please have a
look to the defaults of `~matplotlib.ticker.MaxNLocator`.
"""
if mpl.rcParams['_internal.classic_mode']:
nbins = 9
steps = [1, 2, 5, 10]
else:
nbins = 'auto'
steps = [1, 2, 2.5, 5, 10]
super().__init__(nbins=nbins, steps=steps)
class AutoMinorLocator(Locator):
"""
Place evenly spaced minor ticks, with the step size and maximum number of ticks
chosen automatically.
The Axis must use a linear scale and have evenly spaced major ticks.
"""
def __init__(self, n=None):
"""
Parameters
----------
n : int or 'auto', default: :rc:`xtick.minor.ndivs` or :rc:`ytick.minor.ndivs`
The number of subdivisions of the interval between major ticks;
e.g., n=2 will place a single minor tick midway between major ticks.
If *n* is 'auto', it will be set to 4 or 5: if the distance
between the major ticks equals 1, 2.5, 5 or 10 it can be perfectly
divided in 5 equidistant sub-intervals with a length multiple of
0.05; otherwise, it is divided in 4 sub-intervals.
"""
self.ndivs = n
def __call__(self):
# docstring inherited
if self.axis.get_scale() == 'log':
_api.warn_external('AutoMinorLocator does not work on logarithmic scales')
return []
majorlocs = np.unique(self.axis.get_majorticklocs())
if len(majorlocs) < 2:
# Need at least two major ticks to find minor tick locations.
# TODO: Figure out a way to still be able to display minor ticks with less
# than two major ticks visible. For now, just display no ticks at all.
return []
majorstep = majorlocs[1] - majorlocs[0]
if self.ndivs is None:
self.ndivs = mpl.rcParams[
'ytick.minor.ndivs' if self.axis.axis_name == 'y'
else 'xtick.minor.ndivs'] # for x and z axis
if self.ndivs == 'auto':
majorstep_mantissa = 10 ** (np.log10(majorstep) % 1)
ndivs = 5 if np.isclose(majorstep_mantissa, [1, 2.5, 5, 10]).any() else 4
else:
ndivs = self.ndivs
minorstep = majorstep / ndivs
vmin, vmax = sorted(self.axis.get_view_interval())
t0 = majorlocs[0]
tmin = round((vmin - t0) / minorstep)
tmax = round((vmax - t0) / minorstep) + 1
locs = (np.arange(tmin, tmax) * minorstep) + t0
return self.raise_if_exceeds(locs)
def tick_values(self, vmin, vmax):
raise NotImplementedError(
f"Cannot get tick locations for a {type(self).__name__}")
venv\Lib\site-packages\matplotlib\transforms.py
"""
Matplotlib includes a framework for arbitrary geometric transformations that is used to
determine the final position of all elements drawn on the canvas.
Transforms are composed into trees of `TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data to the figure:
.. graphviz:: /api/transforms.dot
:alt: Diagram of transform tree from data to figure coordinates.
The framework can be used for both affine and non-affine
transformations. However, for speed, we want to use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
See the tutorial :ref:`transforms_tutorial` for examples
of how to use transforms.
"""
# Note: There are a number of places in the code where we use `np.min` or
# `np.minimum` instead of the builtin `min`, and likewise for `max`. This is
# done so that `nan`s are propagated, instead of being silently dropped.
import functools
import itertools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
DEBUG = False
def _make_str_method(*args, **kwargs):
"""
Generate a ``__str__`` method for a `.Transform` subclass.
After ::
class T:
__str__ = _make_str_method("attr", key="other")
``str(T(...))`` will be
.. code-block:: text
{type(T).__name__}(
{self.attr},
key={self.other})
"""
indent = functools.partial(textwrap.indent, prefix=" " * 4)
def strrepr(x): return repr(x) if isinstance(x, str) else str(x)
return lambda self: (
type(self).__name__ + "("
+ ",".join([*(indent("\n" + strrepr(getattr(self, arg)))
for arg in args),
*(indent("\n" + k + "=" + strrepr(getattr(self, arg)))
for k, arg in kwargs.items())])
+ ")")
class TransformNode:
"""
The base class for anything that participates in the transform tree
and needs to invalidate its parents or be invalidated. This includes
classes that are not really transforms, such as bounding boxes, since some
transforms depend on bounding boxes to compute their values.
"""
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
# Possible values for the _invalid attribute.
_VALID, _INVALID_AFFINE_ONLY, _INVALID_FULL = range(3)
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = _api.deprecated("3.9")(_api.classproperty(lambda cls: False))
pass_through = False
"""
If pass_through is True, all ancestors will always be
invalidated, even if 'self' is already invalid.
"""
def __init__(self, shorthand_name=None):
"""
Parameters
----------
shorthand_name : str
A string representing the "name" of the transform. The name carries
no significance other than to improve the readability of
``str(transform)`` when DEBUG=True.
"""
self._parents = {}
# Initially invalid, until first computation.
self._invalid = self._INVALID_FULL
self._shorthand_name = shorthand_name or ''
if DEBUG:
def __str__(self):
# either just return the name of this TransformNode, or its repr
return self._shorthand_name or repr(self)
def __getstate__(self):
# turn the dictionary with weak values into a normal dictionary
return {**self.__dict__,
'_parents': {k: v() for k, v in self._parents.items()}}
def __setstate__(self, data_dict):
self.__dict__ = data_dict
# turn the normal dictionary back into a dictionary with weak values
# The extra lambda is to provide a callback to remove dead
# weakrefs from the dictionary when garbage collection is done.
self._parents = {
k: weakref.ref(v, lambda _, pop=self._parents.pop, k=k: pop(k))
for k, v in self._parents.items() if v is not None}
def __copy__(self):
cls = type(self)
other = cls.__new__(cls)
other.__dict__.update(self.__dict__)
# If `c = a + b; a1 = copy(a)`, then modifications to `a1` do not
# propagate back to `c`, i.e. we need to clear the parents of `a1`.
other._parents = {}
# If `c = a + b; c1 = copy(c)`, then modifications to `a` also need to
# be propagated to `c1`.
for key, val in vars(self).items():
if isinstance(val, TransformNode) and id(self) in val._parents:
other.set_children(val) # val == getattr(other, key)
return other
def invalidate(self):
"""
Invalidate this `TransformNode` and triggers an invalidation of its
ancestors. Should be called any time the transform changes.
"""
return self._invalidate_internal(
level=self._INVALID_AFFINE_ONLY if self.is_affine else self._INVALID_FULL,
invalidating_node=self)
def _invalidate_internal(self, level, invalidating_node):
"""
Called by :meth:`invalidate` and subsequently ascends the transform
stack calling each TransformNode's _invalidate_internal method.
"""
# If we are already more invalid than the currently propagated invalidation,
# then we don't need to do anything.
if level <= self._invalid and not self.pass_through:
return
self._invalid = level
for parent in list(self._parents.values()):
parent = parent() # Dereference the weak reference.
if parent is not None:
parent._invalidate_internal(level=level, invalidating_node=self)
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
# Parents are stored as weak references, so that if the
# parents are destroyed, references from the children won't
# keep them alive.
id_self = id(self)
for child in children:
# Use weak references so this dictionary won't keep obsolete nodes
# alive; the callback deletes the dictionary entry. This is a
# performance improvement over using WeakValueDictionary.
ref = weakref.ref(
self, lambda _, pop=child._parents.pop, k=id_self: pop(k))
child._parents[id_self] = ref
def frozen(self):
"""
Return a frozen copy of this transform node. The frozen copy will not
be updated when its children change. Useful for storing a previously
known state of a transform where ``copy.deepcopy()`` might normally be
used.
"""
return self
class BboxBase(TransformNode):
"""
The base class of all bounding boxes.
This class is immutable; `Bbox` is a mutable subclass.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicitly.
"""
is_bbox = _api.deprecated("3.9")(_api.classproperty(lambda cls: True))
is_affine = True
if DEBUG:
@staticmethod
def _check(points):
if isinstance(points, np.ma.MaskedArray):
_api.warn_external("Bbox bounds are a masked array.")
points = np.asarray(points)
if any((points[1, :] - points[0, :]) == 0):
_api.warn_external("Singular Bbox.")
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
@property
def x0(self):
"""
The first of the pair of *x* coordinates that define the bounding box.
This is not guaranteed to be less than :attr:`x1` (for that, use
:attr:`xmin`).
"""
return self.get_points()[0, 0]
@property
def y0(self):
"""
The first of the pair of *y* coordinates that define the bounding box.
This is not guaranteed to be less than :attr:`y1` (for that, use
:attr:`ymin`).
"""
return self.get_points()[0, 1]
@property
def x1(self):
"""
The second of the pair of *x* coordinates that define the bounding box.
This is not guaranteed to be greater than :attr:`x0` (for that, use
:attr:`xmax`).
"""
return self.get_points()[1, 0]
@property
def y1(self):
"""
The second of the pair of *y* coordinates that define the bounding box.
This is not guaranteed to be greater than :attr:`y0` (for that, use
:attr:`ymax`).
"""
return self.get_points()[1, 1]
@property
def p0(self):
"""
The first pair of (*x*, *y*) coordinates that define the bounding box.
This is not guaranteed to be the bottom-left corner (for that, use
:attr:`min`).
"""
return self.get_points()[0]
@property
def p1(self):
"""
The second pair of (*x*, *y*) coordinates that define the bounding box.
This is not guaranteed to be the top-right corner (for that, use
:attr:`max`).
"""
return self.get_points()[1]
@property
def xmin(self):
"""The left edge of the bounding box."""
return np.min(self.get_points()[:, 0])
@property
def ymin(self):
"""The bottom edge of the bounding box."""
return np.min(self.get_points()[:, 1])
@property
def xmax(self):
"""The right edge of the bounding box."""
return np.max(self.get_points()[:, 0])
@property
def ymax(self):
"""The top edge of the bounding box."""
return np.max(self.get_points()[:, 1])
@property
def min(self):
"""The bottom-left corner of the bounding box."""
return np.min(self.get_points(), axis=0)
@property
def max(self):
"""The top-right corner of the bounding box."""
return np.max(self.get_points(), axis=0)
@property
def intervalx(self):
"""
The pair of *x* coordinates that define the bounding box.
This is not guaranteed to be sorted from left to right.
"""
return self.get_points()[:, 0]
@property
def intervaly(self):
"""
The pair of *y* coordinates that define the bounding box.
This is not guaranteed to be sorted from bottom to top.
"""
return self.get_points()[:, 1]
@property
def width(self):
"""The (signed) width of the bounding box."""
points = self.get_points()
return points[1, 0] - points[0, 0]
@property
def height(self):
"""The (signed) height of the bounding box."""
points = self.get_points()
return points[1, 1] - points[0, 1]
@property
def size(self):
"""The (signed) width and height of the bounding box."""
points = self.get_points()
return points[1] - points[0]
@property
def bounds(self):
"""Return (:attr:`x0`, :attr:`y0`, :attr:`width`, :attr:`height`)."""
(x0, y0), (x1, y1) = self.get_points()
return (x0, y0, x1 - x0, y1 - y0)
@property
def extents(self):
"""Return (:attr:`x0`, :attr:`y0`, :attr:`x1`, :attr:`y1`)."""
return self.get_points().flatten() # flatten returns a copy.
def get_points(self):
raise NotImplementedError
def containsx(self, x):
"""
Return whether *x* is in the closed (:attr:`x0`, :attr:`x1`) interval.
"""
x0, x1 = self.intervalx
return x0 <= x <= x1 or x0 >= x >= x1
def containsy(self, y):
"""
Return whether *y* is in the closed (:attr:`y0`, :attr:`y1`) interval.
"""
y0, y1 = self.intervaly
return y0 <= y <= y1 or y0 >= y >= y1
def contains(self, x, y):
"""
Return whether ``(x, y)`` is in the bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Return whether this bounding box overlaps with the other bounding box.
Parameters
----------
other : `.BboxBase`
"""
ax1, ay1, ax2, ay2 = self.extents
bx1, by1, bx2, by2 = other.extents
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return ax1 <= bx2 and bx1 <= ax2 and ay1 <= by2 and by1 <= ay2
def fully_containsx(self, x):
"""
Return whether *x* is in the open (:attr:`x0`, :attr:`x1`) interval.
"""
x0, x1 = self.intervalx
return x0 < x < x1 or x0 > x > x1
def fully_containsy(self, y):
"""
Return whether *y* is in the open (:attr:`y0`, :attr:`y1`) interval.
"""
y0, y1 = self.intervaly
return y0 < y < y1 or y0 > y > y1
def fully_contains(self, x, y):
"""
Return whether ``x, y`` is in the bounding box, but not on its edge.
"""
return self.fully_containsx(x) and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Return whether this bounding box overlaps with the other bounding box,
not including the edges.
Parameters
----------
other : `.BboxBase`
"""
ax1, ay1, ax2, ay2 = self.extents
bx1, by1, bx2, by2 = other.extents
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return ax1 < bx2 and bx1 < ax2 and ay1 < by2 and by1 < ay2
def transformed(self, transform):
"""
Construct a `Bbox` by statically transforming this one by *transform*.
"""
pts = self.get_points()
ll, ul, lr = transform.transform(np.array(
[pts[0], [pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]]))
return Bbox([ll, [lr[0], ul[1]]])
coefs = {'C': (0.5, 0.5),
'SW': (0, 0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container):
"""
Return a copy of the `Bbox` anchored to *c* within *container*.
Parameters
----------
c : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', ...}
Either an (*x*, *y*) pair of relative coordinates (0 is left or
bottom, 1 is right or top), 'C' (center), or a cardinal direction
('SW', southwest, is bottom left, etc.).
container : `Bbox`
The box within which the `Bbox` is positioned.
See Also
--------
.Axes.set_anchor
"""
l, b, w, h = container.bounds
L, B, W, H = self.bounds
cx, cy = self.coefs[c] if isinstance(c, str) else c
return Bbox(self._points +
[(l + cx * (w - W)) - L,
(b + cy * (h - H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the `Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):
"""
Return a copy of the `Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative (i.e.
fractions of a larger box such as a figure) then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
if box_aspect <= 0 or fig_aspect <= 0:
raise ValueError("'box_aspect' and 'fig_aspect' must be positive")
if container is None:
container = self
w, h = container.size
H = w * box_aspect / fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect / box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
Return a list of new `Bbox` objects formed by splitting the original
one with vertical lines at fractional positions given by *args*.
"""
xf = [0, *args, 1]
x0, y0, x1, y1 = self.extents
w = x1 - x0
return [Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]])
for xf0, xf1 in itertools.pairwise(xf)]
def splity(self, *args):
"""
Return a list of new `Bbox` objects formed by splitting the original
one with horizontal lines at fractional positions given by *args*.
"""
yf = [0, *args, 1]
x0, y0, x1, y1 = self.extents
h = y1 - y0
return [Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]])
for yf0, yf1 in itertools.pairwise(yf)]
def count_contains(self, vertices):
"""
Count the number of vertices contained in the `Bbox`.
Any vertices with a non-finite x or y value are ignored.
Parameters
----------
vertices : (N, 2) array
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
with np.errstate(invalid='ignore'):
return (((self.min < vertices) &
(vertices < self.max)).all(axis=1).sum())
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
Parameters
----------
bboxes : sequence of `.BboxBase`
"""
return count_bboxes_overlapping_bbox(
self, np.atleast_3d([np.array(x) for x in bboxes]))
def expanded(self, sw, sh):
"""
Construct a `Bbox` by expanding this one around its center by the
factors *sw* and *sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, w_pad, h_pad=None):
"""
Construct a `Bbox` by padding this one on all four sides.
Parameters
----------
w_pad : float
Width pad
h_pad : float, optional
Height pad. Defaults to *w_pad*.
"""
points = self.get_points()
if h_pad is None:
h_pad = w_pad
return Bbox(points + [[-w_pad, -h_pad], [w_pad, h_pad]])
def translated(self, tx, ty):
"""Construct a `Bbox` by translating this one by *tx* and *ty*."""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return the corners of this rectangle as an array of points.
Specifically, this returns the array
``[[x0, y0], [x0, y1], [x1, y0], [x1, y1]]``.
"""
(x0, y0), (x1, y1) = self.get_points()
return np.array([[x0, y0], [x0, y1], [x1, y0], [x1, y1]])
def rotated(self, radians):
"""
Return the axes-aligned bounding box that bounds the result of rotating
this `Bbox` by an angle of *radians*.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
@staticmethod
def union(bboxes):
"""Return a `Bbox` that contains all of the given *bboxes*."""
if not len(bboxes):
raise ValueError("'bboxes' cannot be empty")
x0 = np.min([bbox.xmin for bbox in bboxes])
x1 = np.max([bbox.xmax for bbox in bboxes])
y0 = np.min([bbox.ymin for bbox in bboxes])
y1 = np.max([bbox.ymax for bbox in bboxes])
return Bbox([[x0, y0], [x1, y1]])
@staticmethod
def intersection(bbox1, bbox2):
"""
Return the intersection of *bbox1* and *bbox2* if they intersect, or
None if they don't.
"""
x0 = np.maximum(bbox1.xmin, bbox2.xmin)
x1 = np.minimum(bbox1.xmax, bbox2.xmax)
y0 = np.maximum(bbox1.ymin, bbox2.ymin)
y1 = np.minimum(bbox1.ymax, bbox2.ymax)
return Bbox([[x0, y0], [x1, y1]]) if x0 <= x1 and y0 <= y1 else None
_default_minpos = np.array([np.inf, np.inf])
class Bbox(BboxBase):
"""
A mutable bounding box.
Examples
--------
**Create from known bounds**
The default constructor takes the boundary "points" ``[[xmin, ymin],
[xmax, ymax]]``.
>>> Bbox([[1, 1], [3, 7]])
Bbox([[1.0, 1.0], [3.0, 7.0]])
Alternatively, a Bbox can be created from the flattened points array, the
so-called "extents" ``(xmin, ymin, xmax, ymax)``
>>> Bbox.from_extents(1, 1, 3, 7)
Bbox([[1.0, 1.0], [3.0, 7.0]])
or from the "bounds" ``(xmin, ymin, width, height)``.
>>> Bbox.from_bounds(1, 1, 2, 6)
Bbox([[1.0, 1.0], [3.0, 7.0]])
**Create from collections of points**
The "empty" object for accumulating Bboxs is the null bbox, which is a
stand-in for the empty set.
>>> Bbox.null()
Bbox([[inf, inf], [-inf, -inf]])
Adding points to the null bbox will give you the bbox of those points.
>>> box = Bbox.null()
>>> box.update_from_data_xy([[1, 1]])
>>> box
Bbox([[1.0, 1.0], [1.0, 1.0]])
>>> box.update_from_data_xy([[2, 3], [3, 2]], ignore=False)
>>> box
Bbox([[1.0, 1.0], [3.0, 3.0]])
Setting ``ignore=True`` is equivalent to starting over from a null bbox.
>>> box.update_from_data_xy([[1, 1]], ignore=True)
>>> box
Bbox([[1.0, 1.0], [1.0, 1.0]])
.. warning::
It is recommended to always specify ``ignore`` explicitly. If not, the
default value of ``ignore`` can be changed at any time by code with
access to your Bbox, for example using the method `~.Bbox.ignore`.
**Properties of the ``null`` bbox**
.. note::
The current behavior of `Bbox.null()` may be surprising as it does
not have all of the properties of the "empty set", and as such does
not behave like a "zero" object in the mathematical sense. We may
change that in the future (with a deprecation period).
The null bbox is the identity for intersections
>>> Bbox.intersection(Bbox([[1, 1], [3, 7]]), Bbox.null())
Bbox([[1.0, 1.0], [3.0, 7.0]])
except with itself, where it returns the full space.
>>> Bbox.intersection(Bbox.null(), Bbox.null())
Bbox([[-inf, -inf], [inf, inf]])
A union containing null will always return the full space (not the other
set!)
>>> Bbox.union([Bbox([[0, 0], [0, 0]]), Bbox.null()])
Bbox([[-inf, -inf], [inf, inf]])
"""
def __init__(self, points, **kwargs):
"""
Parameters
----------
points : `~numpy.ndarray`
A (2, 2) array of the form ``[[x0, y0], [x1, y1]]``.
"""
super().__init__(**kwargs)
points = np.asarray(points, float)
if points.shape != (2, 2):
raise ValueError('Bbox points must be of the form '
'"[[x0, y0], [x1, y1]]".')
self._points = points
self._minpos = _default_minpos.copy()
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points, **kwargs):
self._check(points)
self.___init__(points, **kwargs)
def invalidate(self):
self._check(self._points)
super().invalidate()
def frozen(self):
# docstring inherited
frozen_bbox = super().frozen()
frozen_bbox._minpos = self.minpos.copy()
return frozen_bbox
@staticmethod
def unit():
"""Create a new unit `Bbox` from (0, 0) to (1, 1)."""
return Bbox([[0, 0], [1, 1]])
@staticmethod
def null():
"""Create a new null `Bbox` from (inf, inf) to (-inf, -inf)."""
return Bbox([[np.inf, np.inf], [-np.inf, -np.inf]])
@staticmethod
def from_bounds(x0, y0, width, height):
"""
Create a new `Bbox` from *x0*, *y0*, *width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
@staticmethod
def from_extents(*args, minpos=None):
"""
Create a new Bbox from *left*, *bottom*, *right* and *top*.
The *y*-axis increases upwards.
Parameters
----------
left, bottom, right, top : float
The four extents of the bounding box.
minpos : float or None
If this is supplied, the Bbox will have a minimum positive value
set. This is useful when dealing with logarithmic scales and other
scales where negative bounds result in floating point errors.
"""
bbox = Bbox(np.reshape(args, (2, 2)))
if minpos is not None:
bbox._minpos[:] = minpos
return bbox
def __format__(self, fmt):
return (
'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'.
format(self, fmt))
def __str__(self):
return format(self, '')
def __repr__(self):
return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self)
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data_xy`.
value : bool
- When ``True``, subsequent calls to `update_from_data_xy` will
ignore the existing bounds of the `Bbox`.
- When ``False``, subsequent calls to `update_from_data_xy` will
include the existing bounds of the `Bbox`.
"""
self._ignore = value
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the `Bbox` to contain the vertices of the
provided path. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
Parameters
----------
path : `~matplotlib.path.Path`
ignore : bool, optional
- When ``True``, ignore the existing bounds of the `Bbox`.
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
updatex, updatey : bool, default: True
When ``True``, update the x/y values.
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:, 0] = points[:, 0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:, 1] = points[:, 1]
self._minpos[1] = minpos[1]
def update_from_data_x(self, x, ignore=None):
"""
Update the x-bounds of the `Bbox` based on the passed in data. After
updating, the bounds will have positive *width*, and *x0* will be the
minimal value.
Parameters
----------
x : `~numpy.ndarray`
Array of x-values.
ignore : bool, optional
- When ``True``, ignore the existing bounds of the `Bbox`.
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
"""
x = np.ravel(x)
self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]),
ignore=ignore, updatey=False)
def update_from_data_y(self, y, ignore=None):
"""
Update the y-bounds of the `Bbox` based on the passed in data. After
updating, the bounds will have positive *height*, and *y0* will be the
minimal value.
Parameters
----------
y : `~numpy.ndarray`
Array of y-values.
ignore : bool, optional
- When ``True``, ignore the existing bounds of the `Bbox`.
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
"""
y = np.ravel(y)
self.update_from_data_xy(np.column_stack([np.ones(y.size), y]),
ignore=ignore, updatex=False)
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the `Bbox` bounds based on the passed in *xy* coordinates.
After updating, the bounds will have positive *width* and *height*;
*x0* and *y0* will be the minimal values.
Parameters
----------
xy : (N, 2) array-like
The (x, y) coordinates.
ignore : bool, optional
- When ``True``, ignore the existing bounds of the `Bbox`.
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
updatex, updatey : bool, default: True
When ``True``, update the x/y values.
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
@BboxBase.x0.setter
def x0(self, val):
self._points[0, 0] = val
self.invalidate()
@BboxBase.y0.setter
def y0(self, val):
self._points[0, 1] = val
self.invalidate()
@BboxBase.x1.setter
def x1(self, val):
self._points[1, 0] = val
self.invalidate()
@BboxBase.y1.setter
def y1(self, val):
self._points[1, 1] = val
self.invalidate()
@BboxBase.p0.setter
def p0(self, val):
self._points[0] = val
self.invalidate()
@BboxBase.p1.setter
def p1(self, val):
self._points[1] = val
self.invalidate()
@BboxBase.intervalx.setter
def intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
@BboxBase.intervaly.setter
def intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
@BboxBase.bounds.setter
def bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l + w, b + h]], float)
if np.any(self._points != points):
self._points = points
self.invalidate()
@property
def minpos(self):
"""
The minimum positive value in both directions within the Bbox.
This is useful when dealing with logarithmic scales and other scales
where negative bounds result in floating point errors, and will be used
as the minimum extent instead of *p0*.
"""
return self._minpos
@minpos.setter
def minpos(self, val):
self._minpos[:] = val
@property
def minposx(self):
"""
The minimum positive value in the *x*-direction within the Bbox.
This is useful when dealing with logarithmic scales and other scales
where negative bounds result in floating point errors, and will be used
as the minimum *x*-extent instead of *x0*.
"""
return self._minpos[0]
@minposx.setter
def minposx(self, val):
self._minpos[0] = val
@property
def minposy(self):
"""
The minimum positive value in the *y*-direction within the Bbox.
This is useful when dealing with logarithmic scales and other scales
where negative bounds result in floating point errors, and will be used
as the minimum *y*-extent instead of *y0*.
"""
return self._minpos[1]
@minposy.setter
def minposy(self, val):
self._minpos[1] = val
def get_points(self):
"""
Get the points of the bounding box as an array of the form
``[[x0, y0], [x1, y1]]``.
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from an array of the form
``[[x0, y0], [x1, y1]]``. No error checking is performed, as this
method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another `Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
"""Return whether the bbox has changed since init."""
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
"""Return whether the x-limits have changed since init."""
return (self._points[0, 0] != self._points_orig[0, 0] or
self._points[1, 0] != self._points_orig[1, 0])
def mutatedy(self):
"""Return whether the y-limits have changed since init."""
return (self._points[0, 1] != self._points_orig[0, 1] or
self._points[1, 1] != self._points_orig[1, 1])
class TransformedBbox(BboxBase):
"""
A `Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform, **kwargs):
"""
Parameters
----------
bbox : `Bbox`
transform : `Transform`
"""
_api.check_isinstance(BboxBase, bbox=bbox)
_api.check_isinstance(Transform, transform=transform)
if transform.input_dims != 2 or transform.output_dims != 2:
raise ValueError(
"The input and output dimensions of 'transform' must be 2")
super().__init__(**kwargs)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
__str__ = _make_str_method("_bbox", "_transform")
def get_points(self):
# docstring inherited
if self._invalid:
p = self._bbox.get_points()
# Transform all four points, then make a new bounding box
# from the result, taking care to make the orientation the
# same.
points = self._transform.transform(
[[p[0, 0], p[0, 1]],
[p[1, 0], p[0, 1]],
[p[0, 0], p[1, 1]],
[p[1, 0], p[1, 1]]])
points = np.ma.filled(points, 0.0)
xs = min(points[:, 0]), max(points[:, 0])
if p[0, 0] > p[1, 0]:
xs = xs[::-1]
ys = min(points[:, 1]), max(points[:, 1])
if p[0, 1] > p[1, 1]:
ys = ys[::-1]
self._points = np.array([
[xs[0], ys[0]],
[xs[1], ys[1]]
])
self._invalid = 0
return self._points
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
def contains(self, x, y):
# Docstring inherited.
return self._bbox.contains(*self._transform.inverted().transform((x, y)))
def fully_contains(self, x, y):
# Docstring inherited.
return self._bbox.fully_contains(*self._transform.inverted().transform((x, y)))
class LockableBbox(BboxBase):
"""
A `Bbox` where some elements may be locked at certain values.
When the child bounding box changes, the bounds of this bbox will update
accordingly with the exception of the locked elements.
"""
def __init__(self, bbox, x0=None, y0=None, x1=None, y1=None, **kwargs):
"""
Parameters
----------
bbox : `Bbox`
The child bounding box to wrap.
x0 : float or None
The locked value for x0, or None to leave unlocked.
y0 : float or None
The locked value for y0, or None to leave unlocked.
x1 : float or None
The locked value for x1, or None to leave unlocked.
y1 : float or None
The locked value for y1, or None to leave unlocked.
"""
_api.check_isinstance(BboxBase, bbox=bbox)
super().__init__(**kwargs)
self._bbox = bbox
self.set_children(bbox)
self._points = None
fp = [x0, y0, x1, y1]
mask = [val is None for val in fp]
self._locked_points = np.ma.array(fp, float, mask=mask).reshape((2, 2))
__str__ = _make_str_method("_bbox", "_locked_points")
def get_points(self):
# docstring inherited
if self._invalid:
points = self._bbox.get_points()
self._points = np.where(self._locked_points.mask,
points,
self._locked_points)
self._invalid = 0
return self._points
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
@property
def locked_x0(self):
"""
float or None: The value used for the locked x0.
"""
if self._locked_points.mask[0, 0]:
return None
else:
return self._locked_points[0, 0]
@locked_x0.setter
def locked_x0(self, x0):
self._locked_points.mask[0, 0] = x0 is None
self._locked_points.data[0, 0] = x0
self.invalidate()
@property
def locked_y0(self):
"""
float or None: The value used for the locked y0.
"""
if self._locked_points.mask[0, 1]:
return None
else:
return self._locked_points[0, 1]
@locked_y0.setter
def locked_y0(self, y0):
self._locked_points.mask[0, 1] = y0 is None
self._locked_points.data[0, 1] = y0
self.invalidate()
@property
def locked_x1(self):
"""
float or None: The value used for the locked x1.
"""
if self._locked_points.mask[1, 0]:
return None
else:
return self._locked_points[1, 0]
@locked_x1.setter
def locked_x1(self, x1):
self._locked_points.mask[1, 0] = x1 is None
self._locked_points.data[1, 0] = x1
self.invalidate()
@property
def locked_y1(self):
"""
float or None: The value used for the locked y1.
"""
if self._locked_points.mask[1, 1]:
return None
else:
return self._locked_points[1, 1]
@locked_y1.setter
def locked_y1(self, y1):
self._locked_points.mask[1, 1] = y1 is None
self._locked_points.data[1, 1] = y1
self.invalidate()
class Transform(TransformNode):
"""
The base class of all `TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of `Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :meth:`inverted` (if an inverse exists)
The following attributes may be overridden if the default is unsuitable:
- :attr:`is_separable` (defaults to True for 1D -> 1D transforms, False
otherwise)
- :attr:`has_inverse` (defaults to True if :meth:`inverted` is overridden,
False otherwise)
If the transform needs to do something non-standard with
`matplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
input_dims = None
"""
The number of input dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
output_dims = None
"""
The number of output dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
is_separable = False
"""True if this transform is separable in the x- and y- dimensions."""
has_inverse = False
"""True if this transform has a corresponding inverse transform."""
def __init_subclass__(cls):
# 1d transforms are always separable; we assume higher-dimensional ones
# are not but subclasses can also directly set is_separable -- this is
# verified by checking whether "is_separable" appears more than once in
# the class's MRO (it appears once in Transform).
if (sum("is_separable" in vars(parent) for parent in cls.__mro__) == 1
and cls.input_dims == cls.output_dims == 1):
cls.is_separable = True
# Transform.inverted raises NotImplementedError; we assume that if this
# is overridden then the transform is invertible but subclass can also
# directly set has_inverse.
if (sum("has_inverse" in vars(parent) for parent in cls.__mro__) == 1
and hasattr(cls, "inverted")
and cls.inverted is not Transform.inverted):
cls.has_inverse = True
def __add__(self, other):
"""
Compose two transforms together so that *self* is followed by *other*.
``A + B`` returns a transform ``C`` so that
``C.transform(x) == B.transform(A.transform(x))``.
"""
return (composite_transform_factory(self, other)
if isinstance(other, Transform) else
NotImplemented)
# Equality is based on object identity for `Transform`s (so we don't
# override `__eq__`), but some subclasses, such as TransformWrapper &
# AffineBase, override this behavior.
def _iter_break_from_left_to_right(self):
"""
Return an iterator breaking down this transform stack from left to
right recursively. If self == ((A, N), A) then the result will be an
iterator which yields I : ((A, N), A), followed by A : (N, A),
followed by (A, N) : (A), but not ((A, N), A) : I.
This is equivalent to flattening the stack then yielding
``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1).
"""
yield IdentityTransform(), self
@property
def depth(self):
"""
Return the number of transforms which have been chained
together to form this Transform instance.
.. note::
For the special case of a Composite transform, the maximum depth
of the two is returned.
"""
return 1
def contains_branch(self, other):
"""
Return whether the given transform is a sub-tree of this transform.
This routine uses transform equality to identify sub-trees, therefore
in many situations it is object id which will be used.
For the case where the given transform represents the whole
of this transform, returns True.
"""
if self.depth < other.depth:
return False
# check that a subtree is equal to other (starting from self)
for _, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return True
return False
def contains_branch_seperately(self, other_transform):
"""
Return whether the given branch is a sub-tree of this transform on
each separate dimension.
A common use for this method is to identify if a transform is a blended
transform containing an Axes' data transform. e.g.::
x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)
"""
if self.output_dims != 2:
raise ValueError('contains_branch_seperately only supports '
'transforms with 2 output dimensions')
# for a non-blended transform each separate dimension is the same, so
# just return the appropriate shape.
return (self.contains_branch(other_transform), ) * 2
def __sub__(self, other):
"""
Compose *self* with the inverse of *other*, cancelling identical terms
if any::
# In general:
A - B == A + B.inverted()
# (but see note regarding frozen transforms below).
# If A "ends with" B (i.e. A == A' + B for some A') we can cancel
# out B:
(A' + B) - B == A'
# Likewise, if B "starts with" A (B = A + B'), we can cancel out A:
A - (A + B') == B'.inverted() == B'^-1
Cancellation (rather than naively returning ``A + B.inverted()``) is
important for multiple reasons:
- It avoids floating-point inaccuracies when computing the inverse of
B: ``B - B`` is guaranteed to cancel out exactly (resulting in the
identity transform), whereas ``B + B.inverted()`` may differ by a
small epsilon.
- ``B.inverted()`` always returns a frozen transform: if one computes
``A + B + B.inverted()`` and later mutates ``B``, then
``B.inverted()`` won't be updated and the last two terms won't cancel
out anymore; on the other hand, ``A + B - B`` will always be equal to
``A`` even if ``B`` is mutated.
"""
# we only know how to do this operation if other is a Transform.
if not isinstance(other, Transform):
return NotImplemented
for remainder, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return remainder
for remainder, sub_tree in other._iter_break_from_left_to_right():
if sub_tree == self:
if not remainder.has_inverse:
raise ValueError(
"The shortcut cannot be computed since 'other' "
"includes a non-invertible component")
return remainder.inverted()
# if we have got this far, then there was no shortcut possible
if other.has_inverse:
return self + other.inverted()
else:
raise ValueError('It is not possible to compute transA - transB '
'since transB cannot be inverted and there is no '
'shortcut possible.')
def __array__(self, *args, **kwargs):
"""Array interface to get at this Transform's affine matrix."""
return self.get_affine().get_matrix()
def transform(self, values):
"""
Apply this transformation on the given array of *values*.
Parameters
----------
values : array-like
The input values as an array of length :attr:`input_dims` or
shape (N, :attr:`input_dims`).
Returns
-------
array
The output values as an array of length :attr:`output_dims` or
shape (N, :attr:`output_dims`), depending on the input.
"""
# Ensure that values is a 2d array (but remember whether
# we started with a 1d or 2d array).
values = np.asanyarray(values)
ndim = values.ndim
values = values.reshape((-1, self.input_dims))
# Transform the values
res = self.transform_affine(self.transform_non_affine(values))
# Convert the result back to the shape of the input values.
if ndim == 0:
assert not np.ma.is_masked(res) # just to be on the safe side
return res[0, 0]
if ndim == 1:
return res.reshape(-1)
elif ndim == 2:
return res
raise ValueError(
"Input values must have shape (N, {dims}) or ({dims},)"
.format(dims=self.input_dims))
def transform_affine(self, values):
"""
Apply only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Parameters
----------
values : array
The input values as an array of length :attr:`input_dims` or
shape (N, :attr:`input_dims`).
Returns
-------
array
The output values as an array of length :attr:`output_dims` or
shape (N, :attr:`output_dims`), depending on the input.
"""
return self.get_affine().transform(values)
def transform_non_affine(self, values):
"""
Apply only the non-affine part of this transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Parameters
----------
values : array
The input values as an array of length :attr:`input_dims` or
shape (N, :attr:`input_dims`).
Returns
-------
array
The output values as an array of length :attr:`output_dims` or
shape (N, :attr:`output_dims`), depending on the input.
"""
return values
def transform_bbox(self, bbox):
"""
Transform the given bounding box.
For smarter transforms including caching (a common requirement in
Matplotlib), see `TransformedBbox`.
"""
return Bbox(self.transform(bbox.get_points()))
def get_affine(self):
"""Get the affine part of this transform."""
return IdentityTransform()
def get_matrix(self):
"""Get the matrix for the affine part of this transform."""
return self.get_affine().get_matrix()
def transform_point(self, point):
"""
Return a transformed point.
This function is only kept for backcompatibility; the more general
`.transform` method is capable of transforming both a list of points
and a single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
if len(point) != self.input_dims:
raise ValueError("The length of 'point' must be 'self.input_dims'")
return self.transform(point)
def transform_path(self, path):
"""
Apply the transform to `.Path` *path*, returning a new `.Path`.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return self.transform_path_affine(self.transform_path_non_affine(path))
def transform_path_affine(self, path):
"""
Apply the affine part of this transform to `.Path` *path*, returning a
new `.Path`.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return self.get_affine().transform_path_affine(path)
def transform_path_non_affine(self, path):
"""
Apply the non-affine part of this transform to `.Path` *path*,
returning a new `.Path`.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
x = self.transform_non_affine(path.vertices)
return Path._fast_from_codes_and_verts(x, path.codes, path)
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Transform a set of angles anchored at specific locations.
Parameters
----------
angles : (N,) array-like
The angles to transform.
pts : (N, 2) array-like
The points where the angles are anchored.
radians : bool, default: False
Whether *angles* are radians or degrees.
pushoff : float
For each point in *pts* and angle in *angles*, the transformed
angle is computed by transforming a segment of length *pushoff*
starting at that point and making that angle relative to the
horizontal axis, and measuring the angle between the horizontal
axis and the transformed segment.
Returns
-------
(N,) array
"""
# Must be 2D
if self.input_dims != 2 or self.output_dims != 2:
raise NotImplementedError('Only defined in 2D')
angles = np.asarray(angles)
pts = np.asarray(pts)
_api.check_shape((None, 2), pts=pts)
_api.check_shape((None,), angles=angles)
if len(angles) != len(pts):
raise ValueError("There must be as many 'angles' as 'pts'")
# Convert to radians if desired
if not radians:
angles = np.deg2rad(angles)
# Move a short distance away
pts2 = pts + pushoff * np.column_stack([np.cos(angles),
np.sin(angles)])
# Transform both sets of points
tpts = self.transform(pts)
tpts2 = self.transform(pts2)
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2(d[:, 1], d[:, 0])
# Convert back to degrees if desired
if not radians:
a = np.rad2deg(a)
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
It holds ``x == self.inverted().transform(self.transform(x))``.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
`TransformWrapper` instances must have the same input and output dimensions
during their entire lifetime, so the child transform may only be replaced
with another child transform of the same dimensions.
"""
pass_through = True
def __init__(self, child):
"""
*child*: A `Transform` instance. This child may later
be replaced with :meth:`set`.
"""
_api.check_isinstance(Transform, child=child)
super().__init__()
self.set(child)
def __eq__(self, other):
return self._child.__eq__(other)
__str__ = _make_str_method("_child")
def frozen(self):
# docstring inherited
return self._child.frozen()
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
if hasattr(self, "_child"): # Absent during init.
self.invalidate()
new_dims = (child.input_dims, child.output_dims)
old_dims = (self._child.input_dims, self._child.output_dims)
if new_dims != old_dims:
raise ValueError(
f"The input and output dims of the new child {new_dims} "
f"do not match those of current child {old_dims}")
self._child._parents.pop(id(self), None)
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
self.get_matrix = child.get_matrix
# note we do not wrap other properties here since the transform's
# child can be changed with WrappedTransform.set and so checking
# is_affine and other such properties may be dangerous.
self._invalid = 0
self.invalidate()
self._invalid = 0
input_dims = property(lambda self: self._child.input_dims)
output_dims = property(lambda self: self._child.output_dims)
is_affine = property(lambda self: self._child.is_affine)
is_separable = property(lambda self: self._child.is_separable)
has_inverse = property(lambda self: self._child.has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of dimensions.
"""
is_affine = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._inverted = None
def __array__(self, *args, **kwargs):
# optimises the access of the transform matrix vs. the superclass
return self.get_matrix()
def __eq__(self, other):
if getattr(other, "is_affine", False) and hasattr(other, "get_matrix"):
return (self.get_matrix() == other.get_matrix()).all()
return NotImplemented
def transform(self, values):
# docstring inherited
return self.transform_affine(values)
def transform_affine(self, values):
# docstring inherited
raise NotImplementedError('Affine subclasses should override this '
'method.')
def transform_non_affine(self, values):
# docstring inherited
return values
def transform_path(self, path):
# docstring inherited
return self.transform_path_affine(path)
def transform_path_affine(self, path):
# docstring inherited
return Path(self.transform_affine(path.vertices),
path.codes, path._interpolation_steps)
def transform_path_non_affine(self, path):
# docstring inherited
return path
def get_affine(self):
# docstring inherited
return self
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use `Affine2D`.
Subclasses of this class will generally only need to override a
constructor and `~.Transform.get_matrix` that generates a custom 3x3 matrix.
"""
input_dims = 2
output_dims = 2
def frozen(self):
# docstring inherited
return Affine2D(self.get_matrix().copy())
@property
def is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == mtx[1, 0] == 0.0
def to_values(self):
"""
Return the values of the matrix as an ``(a, b, c, d, e, f)`` tuple.
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flat)
def transform_affine(self, values):
mtx = self.get_matrix()
if isinstance(values, np.ma.MaskedArray):
tpoints = affine_transform(values.data, mtx)
return np.ma.MaskedArray(tpoints, mask=np.ma.getmask(values))
return affine_transform(values, mtx)
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, values):
# docstring inherited
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if not isinstance(values, np.ndarray):
_api.warn_external(
f'A non-numpy array of type {type(values)} was passed in '
f'for transformation, which results in poor performance.')
return self._transform_affine(values)
def inverted(self):
# docstring inherited
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix=None, **kwargs):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
super().__init__(**kwargs)
if matrix is None:
# A bit faster than np.identity(3).
matrix = IdentityTransform._mtx
self._mtx = matrix.copy()
self._invalid = 0
_base_str = _make_str_method("_mtx")
def __str__(self):
return (self._base_str()
if (self._mtx != np.diag(np.diag(self._mtx))).any()
else f"Affine2D().scale({self._mtx[0, 0]}, {self._mtx[1, 1]})"
if self._mtx[0, 0] != self._mtx[1, 1]
else f"Affine2D().scale({self._mtx[0, 0]})")
@staticmethod
def from_values(a, b, c, d, e, f):
"""
Create a new Affine2D instance from the given values::
a c e
b d f
0 0 1
.
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], float).reshape((3, 3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 array::
a c e
b d f
0 0 1
.
"""
if self._invalid:
self._inverted = None
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 array::
a c e
b d f
0 0 1
.
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
`Affine2DBase` object.
"""
_api.check_isinstance(Affine2DBase, other=other)
self._mtx = other.get_matrix()
self.invalidate()
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
# A bit faster than np.identity(3).
self._mtx = IdentityTransform._mtx.copy()
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = math.cos(theta)
b = math.sin(theta)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx
mtx[0, 0] = a * xx - b * yx
mtx[0, 1] = a * xy - b * yy
mtx[0, 2] = a * x0 - b * y0
mtx[1, 0] = b * xx + a * yx
mtx[1, 1] = b * xy + a * yy
mtx[1, 2] = b * x0 + a * y0
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(math.radians(degrees))
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
# Cast to float to avoid wraparound issues with uint8's
x, y = float(x), float(y)
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Add a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
self._mtx[0, 2] += tx
self._mtx[1, 2] += ty
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Add a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
# explicit element-wise scaling is fastest
self._mtx[0, 0] *= sx
self._mtx[0, 1] *= sx
self._mtx[0, 2] *= sx
self._mtx[1, 0] *= sy
self._mtx[1, 1] *= sy
self._mtx[1, 2] *= sy
self.invalidate()
return self
def skew(self, xShear, yShear):
"""
Add a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in radians.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
rx = math.tan(xShear)
ry = math.tan(yShear)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[1 rx 0], [ry 1 0], [0 0 1]] * mtx
mtx[0, 0] += rx * yx
mtx[0, 1] += rx * yy
mtx[0, 2] += rx * y0
mtx[1, 0] += ry * xx
mtx[1, 1] += ry * xy
mtx[1, 2] += ry * x0
self.invalidate()
return self
def skew_deg(self, xShear, yShear):
"""
Add a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in degrees.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.skew(math.radians(xShear), math.radians(yShear))
class IdentityTransform(Affine2DBase):
"""
A special class that does one thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
# docstring inherited
return self
__str__ = _make_str_method()
def get_matrix(self):
# docstring inherited
return self._mtx
def transform(self, values):
# docstring inherited
return np.asanyarray(values)
def transform_affine(self, values):
# docstring inherited
return np.asanyarray(values)
def transform_non_affine(self, values):
# docstring inherited
return np.asanyarray(values)
def transform_path(self, path):
# docstring inherited
return path
def transform_path_affine(self, path):
# docstring inherited
return path
def transform_path_non_affine(self, path):
# docstring inherited
return path
def get_affine(self):
# docstring inherited
return self
def inverted(self):
# docstring inherited
return self
class _BlendedMixin:
"""Common methods for `BlendedGenericTransform` and `BlendedAffine2D`."""
def __eq__(self, other):
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
return (self._x.contains_branch(transform),
self._y.contains_branch(transform))
__str__ = _make_str_method("_x", "_y")
class BlendedGenericTransform(_BlendedMixin, Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to transform the
*x*-axis and *y_transform* to transform the *y*-axis.
You will generally not call this constructor directly but use the
`blended_transform_factory` function instead, which can determine
automatically which kind of blended transform to create.
"""
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
@property
def depth(self):
return max(self._x.depth, self._y.depth)
def contains_branch(self, other):
# A blended transform cannot possibly contain a branch from two
# different transforms.
return False
is_affine = property(lambda self: self._x.is_affine and self._y.is_affine)
has_inverse = property(
lambda self: self._x.has_inverse and self._y.has_inverse)
def frozen(self):
# docstring inherited
return blended_transform_factory(self._x.frozen(), self._y.frozen())
def transform_non_affine(self, values):
# docstring inherited
if self._x.is_affine and self._y.is_affine:
return values
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(values)
if x.input_dims == 2:
x_points = x.transform_non_affine(values)[:, 0:1]
else:
x_points = x.transform_non_affine(values[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(values)[:, 1:]
else:
y_points = y.transform_non_affine(values[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if (isinstance(x_points, np.ma.MaskedArray) or
isinstance(y_points, np.ma.MaskedArray)):
return np.ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
def inverted(self):
# docstring inherited
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
def get_affine(self):
# docstring inherited
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# We already know the transforms are separable, so we can skip
# setting b and c to zero.
mtx = np.array([x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]])
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
class BlendedAffine2D(_BlendedMixin, Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type `Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to transform the
*x*-axis and *y_transform* to transform the *y*-axis.
Both *x_transform* and *y_transform* must be 2D affine transforms.
You will generally not call this constructor directly but use the
`blended_transform_factory` function instead, which can determine
automatically which kind of blended transform to create.
"""
is_affine = x_transform.is_affine and y_transform.is_affine
is_separable = x_transform.is_separable and y_transform.is_separable
is_correct = is_affine and is_separable
if not is_correct:
raise ValueError("Both *x_transform* and *y_transform* must be 2D "
"affine transforms")
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def get_matrix(self):
# docstring inherited
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# We already know the transforms are separable, so we can skip
# setting b and c to zero.
self._mtx = np.array([x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]])
self._inverted = None
self._invalid = 0
return self._mtx
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase) and
isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but write ``a +
b`` instead, which will automatically choose the best kind of composite
transform instance to create.
"""
if a.output_dims != b.input_dims:
raise ValueError("The output dimension of 'a' must be equal to "
"the input dimensions of 'b'")
self.input_dims = a.input_dims
self.output_dims = b.output_dims
super().__init__(**kwargs)
self._a = a
self._b = b
self.set_children(a, b)
def frozen(self):
# docstring inherited
self._invalid = 0
frozen = composite_transform_factory(
self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
def _invalidate_internal(self, level, invalidating_node):
# When the left child is invalidated at AFFINE_ONLY level and the right child is
# non-affine, the composite transform is FULLY invalidated.
if invalidating_node is self._a and not self._b.is_affine:
level = Transform._INVALID_FULL
super()._invalidate_internal(level, invalidating_node)
def __eq__(self, other):
if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
return self is other or (self._a == other._a
and self._b == other._b)
else:
return False
def _iter_break_from_left_to_right(self):
for left, right in self._a._iter_break_from_left_to_right():
yield left, right + self._b
for left, right in self._b._iter_break_from_left_to_right():
yield self._a + left, right
def contains_branch_seperately(self, other_transform):
# docstring inherited
if self.output_dims != 2:
raise ValueError('contains_branch_seperately only supports '
'transforms with 2 output dimensions')
if self == other_transform:
return (True, True)
return self._b.contains_branch_seperately(other_transform)
depth = property(lambda self: self._a.depth + self._b.depth)
is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
is_separable = property(
lambda self: self._a.is_separable and self._b.is_separable)
has_inverse = property(
lambda self: self._a.has_inverse and self._b.has_inverse)
__str__ = _make_str_method("_a", "_b")
def transform_affine(self, values):
# docstring inherited
return self.get_affine().transform(values)
def transform_non_affine(self, values):
# docstring inherited
if self._a.is_affine and self._b.is_affine:
return values
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_non_affine(values)
else:
return self._b.transform_non_affine(self._a.transform(values))
def transform_path_non_affine(self, path):
# docstring inherited
if self._a.is_affine and self._b.is_affine:
return path
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_path_non_affine(path)
else:
return self._b.transform_path_non_affine(
self._a.transform_path(path))
def get_affine(self):
# docstring inherited
if not self._b.is_affine:
return self._b.get_affine()
else:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
def inverted(self):
# docstring inherited
return CompositeGenericTransform(
self._b.inverted(), self._a.inverted())
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying `Affine2DBase` *a* then `Affine2DBase` *b*.
You will generally not call this constructor directly but write ``a +
b`` instead, which will automatically choose the best kind of composite
transform instance to create.
"""
if not a.is_affine or not b.is_affine:
raise ValueError("'a' and 'b' must be affine transforms")
if a.output_dims != b.input_dims:
raise ValueError("The output dimension of 'a' must be equal to "
"the input dimensions of 'b'")
self.input_dims = a.input_dims
self.output_dims = b.output_dims
super().__init__(**kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
@property
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for left, right in self._a._iter_break_from_left_to_right():
yield left, right + self._b
for left, right in self._b._iter_break_from_left_to_right():
yield self._a + left, right
__str__ = _make_str_method("_a", "_b")
def get_matrix(self):
# docstring inherited
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
# check to see if any of a or b are IdentityTransforms. We use
# isinstance here to guarantee that the transforms will *always*
# be IdentityTransforms. Since TransformWrappers are mutable,
# use of equality here would be wrong.
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
`BboxTransform` linearly transforms points from one `Bbox` to another.
"""
is_separable = True
def __init__(self, boxin, boxout, **kwargs):
"""
Create a new `BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
_api.check_isinstance(BboxBase, boxin=boxin, boxout=boxout)
super().__init__(**kwargs)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
__str__ = _make_str_method("_boxin", "_boxout")
def get_matrix(self):
# docstring inherited
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError(
"Transforming from or to a singular bounding box")
self._mtx = np.array([[x_scale, 0.0, -inl*x_scale+outl],
[ 0.0, y_scale, -inb*y_scale+outb],
[ 0.0, 0.0, 1.0]],
float)
self._inverted = None
self._invalid = 0
return self._mtx
class BboxTransformTo(Affine2DBase):
"""
`BboxTransformTo` is a transformation that linearly transforms points from
the unit bounding box to a given `Bbox`.
"""
is_separable = True
def __init__(self, boxout, **kwargs):
"""
Create a new `BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
_api.check_isinstance(BboxBase, boxout=boxout)
super().__init__(**kwargs)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
__str__ = _make_str_method("_boxout")
def get_matrix(self):
# docstring inherited
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
float)
self._inverted = None
self._invalid = 0
return self._mtx
@_api.deprecated("3.9")
class BboxTransformToMaxOnly(BboxTransformTo):
"""
`BboxTransformToMaxOnly` is a transformation that linearly transforms points from
the unit bounding box to a given `Bbox` with a fixed upper left of (0, 0).
"""
def get_matrix(self):
# docstring inherited
if self._invalid:
xmax, ymax = self._boxout.max
if DEBUG and (xmax == 0 or ymax == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[xmax, 0.0, 0.0],
[ 0.0, ymax, 0.0],
[ 0.0, 0.0, 1.0]],
float)
self._inverted = None
self._invalid = 0
return self._mtx
class BboxTransformFrom(Affine2DBase):
"""
`BboxTransformFrom` linearly transforms points from a given `Bbox` to the
unit bounding box.
"""
is_separable = True
def __init__(self, boxin, **kwargs):
_api.check_isinstance(BboxBase, boxin=boxin)
super().__init__(**kwargs)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
__str__ = _make_str_method("_boxin")
def get_matrix(self):
# docstring inherited
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0, -inl*x_scale],
[ 0.0, y_scale, -inb*y_scale],
[ 0.0, 0.0, 1.0]],
float)
self._inverted = None
self._invalid = 0
return self._mtx
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformed by *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans, **kwargs):
super().__init__(**kwargs)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
__str__ = _make_str_method("_t")
def get_matrix(self):
# docstring inherited
if self._invalid:
# A bit faster than np.identity(3).
self._mtx = IdentityTransform._mtx.copy()
self._mtx[:2, 2] = self._scale_trans.transform(self._t)
self._invalid = 0
self._inverted = None
return self._mtx
class _ScaledRotation(Affine2DBase):
"""
A transformation that applies rotation by *theta*, after transform by *trans_shift*.
"""
def __init__(self, theta, trans_shift):
super().__init__()
self._theta = theta
self._trans_shift = trans_shift
self._mtx = None
def get_matrix(self):
if self._invalid:
transformed_coords = self._trans_shift.transform([[self._theta, 0]])[0]
adjusted_theta = transformed_coords[0]
rotation = Affine2D().rotate(adjusted_theta)
self._mtx = rotation.get_matrix()
return self._mtx
class AffineDeltaTransform(Affine2DBase):
r"""
A transform wrapper for transforming displacements between pairs of points.
This class is intended to be used to transform displacements ("position
deltas") between pairs of points (e.g., as the ``offset_transform``
of `.Collection`\s): given a transform ``t`` such that ``t =
AffineDeltaTransform(t) + offset``, ``AffineDeltaTransform``
satisfies ``AffineDeltaTransform(a - b) == AffineDeltaTransform(a) -
AffineDeltaTransform(b)``.
This is implemented by forcing the offset components of the transform
matrix to zero.
This class is experimental as of 3.3, and the API may change.
"""
pass_through = True
def __init__(self, transform, **kwargs):
super().__init__(**kwargs)
self._base_transform = transform
self.set_children(transform)
__str__ = _make_str_method("_base_transform")
def get_matrix(self):
if self._invalid:
self._mtx = self._base_transform.get_matrix().copy()
self._mtx[:2, -1] = 0
return self._mtx
class TransformedPath(TransformNode):
"""
A `TransformedPath` caches a non-affine transformed copy of the
`~.path.Path`. This cached copy is automatically updated when the
non-affine part of the transform changes.
.. note::
Paths are considered immutable by this class. Any update to the
path's vertices/codes will not trigger a transform recomputation.
"""
def __init__(self, path, transform):
"""
Parameters
----------
path : `~.path.Path`
transform : `Transform`
"""
_api.check_isinstance(Transform, transform=transform)
super().__init__()
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
# only recompute if the invalidation includes the non_affine part of
# the transform
if (self._invalid == self._INVALID_FULL
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path._fast_from_codes_and_verts(
self._transform.transform_non_affine(self._path.vertices),
None, self._path)
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
self._revalidate()
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
class TransformedPatchPath(TransformedPath):
"""
A `TransformedPatchPath` caches a non-affine transformed copy of the
`~.patches.Patch`. This cached copy is automatically updated when the
non-affine part of the transform or the patch changes.
"""
def __init__(self, patch):
"""
Parameters
----------
patch : `~.patches.Patch`
"""
# Defer to TransformedPath.__init__.
super().__init__(patch.get_path(), patch.get_transform())
self._patch = patch
def _revalidate(self):
patch_path = self._patch.get_path()
# Force invalidation if the patch path changed; otherwise, let base
# class check invalidation.
if patch_path != self._path:
self._path = patch_path
self._transformed_path = None
super()._revalidate()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
"""
Modify the endpoints of a range as needed to avoid singularities.
Parameters
----------
vmin, vmax : float
The initial endpoints.
expander : float, default: 0.001
Fractional amount by which *vmin* and *vmax* are expanded if
the original interval is too small, based on *tiny*.
tiny : float, default: 1e-15
Threshold for the ratio of the interval to the maximum absolute
value of its endpoints. If the interval is smaller than
this, it will be expanded. This value should be around
1e-15 or larger; otherwise the interval will be approaching
the double precision resolution limit.
increasing : bool, default: True
If True, swap *vmin*, *vmax* if *vmin* > *vmax*.
Returns
-------
vmin, vmax : float
Endpoints, expanded and/or swapped if necessary.
If either input is inf or NaN, or if both inputs are 0 or very
close to zero, it returns -*expander*, *expander*.
"""
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
# Expand vmin, vmax to float: if they were integer types, they can wrap
# around in abs (abs(np.int8(-128)) == -128) and vmax - vmin can overflow.
vmin, vmax = map(float, [vmin, vmax])
maxabsvalue = max(abs(vmin), abs(vmax))
if maxabsvalue < (1e6 / tiny) * np.finfo(float).tiny:
vmin = -expander
vmax = expander
elif vmax - vmin <= maxabsvalue * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
"""
Check, inclusively, whether an interval includes a given value.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
Returns
-------
bool
Whether *val* is within the *interval*.
"""
a, b = interval
if a > b:
a, b = b, a
return a <= val <= b
def _interval_contains_close(interval, val, rtol=1e-10):
"""
Check, inclusively, whether an interval includes a given value, with the
interval expanded by a small tolerance to admit floating point errors.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
rtol : float, default: 1e-10
Relative tolerance slippage allowed outside of the interval.
For an interval ``[a, b]``, values
``a - rtol * (b - a) <= val <= b + rtol * (b - a)`` are considered
inside the interval.
Returns
-------
bool
Whether *val* is within the *interval* (with tolerance).
"""
a, b = interval
if a > b:
a, b = b, a
rtol = (b - a) * rtol
return a - rtol <= val <= b + rtol
def interval_contains_open(interval, val):
"""
Check, excluding endpoints, whether an interval includes a given value.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
Returns
-------
bool
Whether *val* is within the *interval*.
"""
a, b = interval
return a < val < b or a > val > b
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
"""
Return a new transform with an added offset.
Parameters
----------
trans : `Transform` subclass
Any transform, to which offset will be applied.
fig : `~matplotlib.figure.Figure`, default: None
Current figure. It can be None if *units* are 'dots'.
x, y : float, default: 0.0
The offset to apply.
units : {'inches', 'points', 'dots'}, default: 'inches'
Units of the offset.
Returns
-------
`Transform` subclass
Transform with applied offset.
"""
_api.check_in_list(['dots', 'points', 'inches'], units=units)
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
# Default units are 'inches'
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
venv\Lib\site-packages\matplotlib\typing.py
"""
Typing support for Matplotlib
This module contains Type aliases which are useful for Matplotlib and potentially
downstream libraries.
.. admonition:: Provisional status of typing
The ``typing`` module and type stub files are considered provisional and may change
at any time without a deprecation period.
"""
from collections.abc import Hashable, Sequence
import pathlib
from typing import Any, Callable, Literal, TypeAlias, TypeVar, Union
from . import path
from ._enums import JoinStyle, CapStyle
from .artist import Artist
from .backend_bases import RendererBase
from .markers import MarkerStyle
from .transforms import Bbox, Transform
RGBColorType: TypeAlias = tuple[float, float, float] | str
RGBAColorType: TypeAlias = (
str | # "none" or "#RRGGBBAA"/"#RGBA" hex strings
tuple[float, float, float, float] |
# 2 tuple (color, alpha) representations, not infinitely recursive
# RGBColorType includes the (str, float) tuple, even for RGBA strings
tuple[RGBColorType, float] |
# (4-tuple, float) is odd, but accepted as the outer float overriding A of 4-tuple
tuple[tuple[float, float, float, float], float]
)
ColorType: TypeAlias = RGBColorType | RGBAColorType
RGBColourType: TypeAlias = RGBColorType
RGBAColourType: TypeAlias = RGBAColorType
ColourType: TypeAlias = ColorType
LineStyleType: TypeAlias = str | tuple[float, Sequence[float]]
DrawStyleType: TypeAlias = Literal["default", "steps", "steps-pre", "steps-mid",
"steps-post"]
MarkEveryType: TypeAlias = (
None |
int | tuple[int, int] | slice | list[int] |
float | tuple[float, float] |
list[bool]
)
MarkerType: TypeAlias = str | path.Path | MarkerStyle
FillStyleType: TypeAlias = Literal["full", "left", "right", "bottom", "top", "none"]
JoinStyleType: TypeAlias = JoinStyle | Literal["miter", "round", "bevel"]
CapStyleType: TypeAlias = CapStyle | Literal["butt", "projecting", "round"]
CoordsBaseType = Union[
str,
Artist,
Transform,
Callable[
[RendererBase],
Union[Bbox, Transform]
]
]
CoordsType = Union[
CoordsBaseType,
tuple[CoordsBaseType, CoordsBaseType]
]
RcStyleType: TypeAlias = (
str |
dict[str, Any] |
pathlib.Path |
Sequence[str | pathlib.Path | dict[str, Any]]
)
_HT = TypeVar("_HT", bound=Hashable)
HashableList: TypeAlias = list[_HT | "HashableList[_HT]"]
"""A nested list of Hashable values."""
venv\Lib\site-packages\matplotlib\units.py
"""
The classes here provide support for using custom classes with
Matplotlib, e.g., those that do not expose the array interface but know
how to convert themselves to arrays. It also supports classes with
units and units conversion. Use cases include converters for custom
objects, e.g., a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation;
rather a units implementation must register with the Registry converter
dictionary and provide a `ConversionInterface`. For example,
here is a complete implementation which supports plotting with native
datetime objects::
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
"Convert a datetime value to a scalar or array."
return dates.date2num(value)
@staticmethod
def axisinfo(unit, axis):
"Return major and minor tick locators and formatters."
if unit != 'date':
return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='date')
@staticmethod
def default_units(x, axis):
"Return the default unit for x or None."
return 'date'
# Finally we register our object type with the Matplotlib units registry.
units.registry[datetime.date] = DateConverter()
"""
from decimal import Decimal
from numbers import Number
import numpy as np
from numpy import ma
from matplotlib import cbook
class ConversionError(TypeError):
pass
def _is_natively_supported(x):
"""
Return whether *x* is of a type that Matplotlib natively supports or an
array of objects of such types.
"""
# Matplotlib natively supports all number types except Decimal.
if np.iterable(x):
# Assume lists are homogeneous as other functions in unit system.
for thisx in x:
if thisx is ma.masked:
continue
return isinstance(thisx, Number) and not isinstance(thisx, Decimal)
else:
return isinstance(x, Number) and not isinstance(x, Decimal)
class AxisInfo:
"""
Information to support default axis labeling, tick labeling, and limits.
An instance of this class must be returned by
`ConversionInterface.axisinfo`.
"""
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None,
default_limits=None):
"""
Parameters
----------
majloc, minloc : Locator, optional
Tick locators for the major and minor ticks.
majfmt, minfmt : Formatter, optional
Tick formatters for the major and minor ticks.
label : str, optional
The default axis label.
default_limits : optional
The default min and max limits of the axis if no data has
been plotted.
Notes
-----
If any of the above are ``None``, the axis will simply use the
default value.
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
self.default_limits = default_limits
class ConversionInterface:
"""
The minimal interface for a converter to take custom data types (or
sequences) and convert them to values Matplotlib can use.
"""
@staticmethod
def axisinfo(unit, axis):
"""Return an `.AxisInfo` for the axis with the specified units."""
return None
@staticmethod
def default_units(x, axis):
"""Return the default unit for *x* or ``None`` for the given axis."""
return None
@staticmethod
def convert(obj, unit, axis):
"""
Convert *obj* using *unit* for the specified *axis*.
If *obj* is a sequence, return the converted sequence. The output must
be a sequence of scalars that can be used by the numpy array layer.
"""
return obj
class DecimalConverter(ConversionInterface):
"""Converter for decimal.Decimal data to float."""
@staticmethod
def convert(value, unit, axis):
"""
Convert Decimals to floats.
The *unit* and *axis* arguments are not used.
Parameters
----------
value : decimal.Decimal or iterable
Decimal or list of Decimal need to be converted
"""
if isinstance(value, Decimal):
return float(value)
# value is Iterable[Decimal]
elif isinstance(value, ma.MaskedArray):
return ma.asarray(value, dtype=float)
else:
return np.asarray(value, dtype=float)
# axisinfo and default_units can be inherited as Decimals are Numbers.
class Registry(dict):
"""Register types with conversion interface."""
def get_converter(self, x):
"""Get the converter interface instance for *x*, or None."""
# Unpack in case of e.g. Pandas or xarray object
x = cbook._unpack_to_numpy(x)
if isinstance(x, np.ndarray):
# In case x in a masked array, access the underlying data (only its
# type matters). If x is a regular ndarray, getdata() just returns
# the array itself.
x = np.ma.getdata(x).ravel()
# If there are no elements in x, infer the units from its dtype
if not x.size:
return self.get_converter(np.array([0], dtype=x.dtype))
for cls in type(x).__mro__: # Look up in the cache.
try:
return self[cls]
except KeyError:
pass
try: # If cache lookup fails, look up based on first element...
first = cbook._safe_first_finite(x)
except (TypeError, StopIteration):
pass
else:
# ... and avoid infinite recursion for pathological iterables for
# which indexing returns instances of the same iterable class.
if type(first) is not type(x):
return self.get_converter(first)
return None
registry = Registry()
registry[Decimal] = DecimalConverter()
venv\Lib\site-packages\matplotlib\widgets.py
"""
GUI neutral widgets
===================
Widgets that are designed to work for any of the GUI backends.
All of these widgets require you to predefine an `~.axes.Axes`
instance and pass that as the first parameter. Matplotlib doesn't try to
be too smart with respect to layout -- you will have to figure out how
wide and tall you want your Axes to be to accommodate your widget.
"""
from contextlib import ExitStack
import copy
import itertools
from numbers import Integral, Number
from cycler import cycler
import numpy as np
import matplotlib as mpl
from . import (_api, _docstring, backend_tools, cbook, collections, colors,
text as mtext, ticker, transforms)
from .lines import Line2D
from .patches import Rectangle, Ellipse, Polygon
from .transforms import TransformedPatchPath, Affine2D
class LockDraw:
"""
Some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstances, like when the toolbar is in zoom-to-rect
mode and drawing a rectangle. To avoid this, a widget can acquire a
canvas' lock with ``canvas.widgetlock(widget)`` before drawing on the
canvas; this will prevent other widgets from doing so at the same time (if
they also try to acquire the lock first).
"""
def __init__(self):
self._owner = None
def __call__(self, o):
"""Reserve the lock for *o*."""
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
"""Release the lock from *o*."""
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
"""Return whether drawing is available to *o*."""
return not self.locked() or self.isowner(o)
def isowner(self, o):
"""Return whether *o* owns this lock."""
return self._owner is o
def locked(self):
"""Return whether the lock is currently held by an owner."""
return self._owner is not None
class Widget:
"""
Abstract base class for GUI neutral widgets.
"""
drawon = True
eventson = True
_active = True
def set_active(self, active):
"""Set whether the widget is active."""
self._active = active
def get_active(self):
"""Get whether the widget is active."""
return self._active
# set_active is overridden by SelectorWidgets.
active = property(get_active, set_active, doc="Is the widget active?")
def ignore(self, event):
"""
Return whether *event* should be ignored.
This method should be called at the beginning of any event callback.
"""
return not self.active
class AxesWidget(Widget):
"""
Widget connected to a single `~matplotlib.axes.Axes`.
To guarantee that the widget remains responsive and not garbage-collected,
a reference to the object should be maintained by the user.
This is necessary because the callback registry
maintains only weak-refs to the functions, which are member
functions of the widget. If there are no references to the widget
object it may be garbage collected which will disconnect the callbacks.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
canvas : `~matplotlib.backend_bases.FigureCanvasBase`
The parent figure canvas for the widget.
active : bool
If False, the widget does not respond to events.
"""
def __init__(self, ax):
self.ax = ax
self._cids = []
canvas = property(lambda self: self.ax.get_figure(root=True).canvas)
def connect_event(self, event, callback):
"""
Connect a callback function with an event.
This should be used in lieu of ``figure.canvas.mpl_connect`` since this
function stores callback ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self._cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self._cids:
self.canvas.mpl_disconnect(c)
def _get_data_coords(self, event):
"""Return *event*'s data coordinates in this widget's Axes."""
# This method handles the possibility that event.inaxes != self.ax (which may
# occur if multiple Axes are overlaid), in which case event.xdata/.ydata will
# be wrong. Note that we still special-case the common case where
# event.inaxes == self.ax and avoid re-running the inverse data transform,
# because that can introduce floating point errors for synthetic events.
return ((event.xdata, event.ydata) if event.inaxes is self.ax
else self.ax.transData.inverted().transform((event.x, event.y)))
class Button(AxesWidget):
"""
A GUI neutral button.
For the button to remain responsive you must keep a reference to it.
Call `.on_clicked` to connect to the button.
Attributes
----------
ax
The `~.axes.Axes` the button renders into.
label
A `.Text` instance.
color
The color of the button when not hovering.
hovercolor
The color of the button when hovering.
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95', *, useblit=True):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance the button will be placed into.
label : str
The button text.
image : array-like or PIL Image
The image to place in the button, if not *None*. The parameter is
directly forwarded to `~.axes.Axes.imshow`.
color : :mpltype:`color`
The color of the button when not activated.
hovercolor : :mpltype:`color`
The color of the button when the mouse is over it.
useblit : bool, default: True
Use blitting for faster drawing if supported by the backend.
See the tutorial :ref:`blitting` for details.
.. versionadded:: 3.7
"""
super().__init__(ax)
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self._useblit = useblit and self.canvas.supports_blit
self._observers = cbook.CallbackRegistry(signals=["clicked"])
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_facecolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
def _click(self, event):
if not self.eventson or self.ignore(event) or not self.ax.contains(event)[0]:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
def _release(self, event):
if self.ignore(event) or event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
if self.eventson and self.ax.contains(event)[0]:
self._observers.process('clicked', event)
def _motion(self, event):
if self.ignore(event):
return
c = self.hovercolor if self.ax.contains(event)[0] else self.color
if not colors.same_color(c, self.ax.get_facecolor()):
self.ax.set_facecolor(c)
if self.drawon:
if self._useblit:
self.ax.draw_artist(self.ax)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw()
def on_clicked(self, func):
"""
Connect the callback function *func* to button click events.
Returns a connection id, which can be used to disconnect the callback.
"""
return self._observers.connect('clicked', lambda event: func(event))
def disconnect(self, cid):
"""Remove the callback function with connection id *cid*."""
self._observers.disconnect(cid)
class SliderBase(AxesWidget):
"""
The base class for constructing Slider widgets. Not intended for direct
usage.
For the slider to remain responsive you must maintain a reference to it.
"""
def __init__(self, ax, orientation, closedmin, closedmax,
valmin, valmax, valfmt, dragging, valstep):
if ax.name == '3d':
raise ValueError('Sliders cannot be added to 3D Axes')
super().__init__(ax)
_api.check_in_list(['horizontal', 'vertical'], orientation=orientation)
self.orientation = orientation
self.closedmin = closedmin
self.closedmax = closedmax
self.valmin = valmin
self.valmax = valmax
self.valstep = valstep
self.drag_active = False
self.valfmt = valfmt
if orientation == "vertical":
ax.set_ylim((valmin, valmax))
axis = ax.yaxis
else:
ax.set_xlim((valmin, valmax))
axis = ax.xaxis
self._fmt = axis.get_major_formatter()
if not isinstance(self._fmt, ticker.ScalarFormatter):
self._fmt = ticker.ScalarFormatter()
self._fmt.set_axis(axis)
self._fmt.set_useOffset(False) # No additive offset.
self._fmt.set_useMathText(True) # x sign before multiplicative offset.
ax.set_axis_off()
ax.set_navigate(False)
self.connect_event("button_press_event", self._update)
self.connect_event("button_release_event", self._update)
if dragging:
self.connect_event("motion_notify_event", self._update)
self._observers = cbook.CallbackRegistry(signals=["changed"])
def _stepped_value(self, val):
"""Return *val* coerced to closest number in the ``valstep`` grid."""
if isinstance(self.valstep, Number):
val = (self.valmin
+ round((val - self.valmin) / self.valstep) * self.valstep)
elif self.valstep is not None:
valstep = np.asanyarray(self.valstep)
if valstep.ndim != 1:
raise ValueError(
f"valstep must have 1 dimension but has {valstep.ndim}"
)
val = valstep[np.argmin(np.abs(valstep - val))]
return val
def disconnect(self, cid):
"""
Remove the observer with connection id *cid*.
Parameters
----------
cid : int
Connection id of the observer to be removed.
"""
self._observers.disconnect(cid)
def reset(self):
"""Reset the slider to the initial value."""
if np.any(self.val != self.valinit):
self.set_val(self.valinit)
class Slider(SliderBase):
"""
A slider representing a floating point range.
Create a slider from *valmin* to *valmax* in Axes *ax*. For the slider to
remain responsive you must maintain a reference to it. Call
:meth:`on_changed` to connect to the slider event.
Attributes
----------
val : float
Slider value.
"""
def __init__(self, ax, label, valmin, valmax, *, valinit=0.5, valfmt=None,
closedmin=True, closedmax=True, slidermin=None,
slidermax=None, dragging=True, valstep=None,
orientation='horizontal', initcolor='r',
track_color='lightgrey', handle_style=None, **kwargs):
"""
Parameters
----------
ax : Axes
The Axes to put the slider in.
label : str
Slider label.
valmin : float
The minimum value of the slider.
valmax : float
The maximum value of the slider.
valinit : float, default: 0.5
The slider initial position.
valfmt : str, default: None
%-format string used to format the slider value. If None, a
`.ScalarFormatter` is used instead.
closedmin : bool, default: True
Whether the slider interval is closed on the bottom.
closedmax : bool, default: True
Whether the slider interval is closed on the top.
slidermin : Slider, default: None
Do not allow the current slider to have a value less than
the value of the Slider *slidermin*.
slidermax : Slider, default: None
Do not allow the current slider to have a value greater than
the value of the Slider *slidermax*.
dragging : bool, default: True
If True the slider can be dragged by the mouse.
valstep : float or array-like, default: None
If a float, the slider will snap to multiples of *valstep*.
If an array the slider will snap to the values in the array.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The orientation of the slider.
initcolor : :mpltype:`color`, default: 'r'
The color of the line at the *valinit* position. Set to ``'none'``
for no line.
track_color : :mpltype:`color`, default: 'lightgrey'
The color of the background track. The track is accessible for
further styling via the *track* attribute.
handle_style : dict
Properties of the slider handle. Default values are
========= ===== ======= ========================================
Key Value Default Description
========= ===== ======= ========================================
facecolor color 'white' The facecolor of the slider handle.
edgecolor color '.75' The edgecolor of the slider handle.
size int 10 The size of the slider handle in points.
========= ===== ======= ========================================
Other values will be transformed as marker{foo} and passed to the
`~.Line2D` constructor. e.g. ``handle_style = {'style'='x'}`` will
result in ``markerstyle = 'x'``.
Notes
-----
Additional kwargs are passed on to ``self.poly`` which is the
`~matplotlib.patches.Rectangle` that draws the slider knob. See the
`.Rectangle` documentation for valid property names (``facecolor``,
``edgecolor``, ``alpha``, etc.).
"""
super().__init__(ax, orientation, closedmin, closedmax,
valmin, valmax, valfmt, dragging, valstep)
if slidermin is not None and not hasattr(slidermin, 'val'):
raise ValueError(
f"Argument slidermin ({type(slidermin)}) has no 'val'")
if slidermax is not None and not hasattr(slidermax, 'val'):
raise ValueError(
f"Argument slidermax ({type(slidermax)}) has no 'val'")
self.slidermin = slidermin
self.slidermax = slidermax
valinit = self._value_in_bounds(valinit)
if valinit is None:
valinit = valmin
self.val = valinit
self.valinit = valinit
defaults = {'facecolor': 'white', 'edgecolor': '.75', 'size': 10}
handle_style = {} if handle_style is None else handle_style
marker_props = {
f'marker{k}': v for k, v in {**defaults, **handle_style}.items()
}
if orientation == 'vertical':
self.track = Rectangle(
(.25, 0), .5, 1,
transform=ax.transAxes,
facecolor=track_color
)
ax.add_patch(self.track)
self.poly = ax.axhspan(valmin, valinit, .25, .75, **kwargs)
# Drawing a longer line and clipping it to the track avoids
# pixelation-related asymmetries.
self.hline = ax.axhline(valinit, 0, 1, color=initcolor, lw=1,
clip_path=TransformedPatchPath(self.track))
handleXY = [[0.5], [valinit]]
else:
self.track = Rectangle(
(0, .25), 1, .5,
transform=ax.transAxes,
facecolor=track_color
)
ax.add_patch(self.track)
self.poly = ax.axvspan(valmin, valinit, .25, .75, **kwargs)
self.vline = ax.axvline(valinit, 0, 1, color=initcolor, lw=1,
clip_path=TransformedPatchPath(self.track))
handleXY = [[valinit], [0.5]]
self._handle, = ax.plot(
*handleXY,
"o",
**marker_props,
clip_on=False
)
if orientation == 'vertical':
self.label = ax.text(0.5, 1.02, label, transform=ax.transAxes,
verticalalignment='bottom',
horizontalalignment='center')
self.valtext = ax.text(0.5, -0.02, self._format(valinit),
transform=ax.transAxes,
verticalalignment='top',
horizontalalignment='center')
else:
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, self._format(valinit),
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.set_val(valinit)
def _value_in_bounds(self, val):
"""Makes sure *val* is with given bounds."""
val = self._stepped_value(val)
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
return val
def _update(self, event):
"""Update the slider position."""
if self.ignore(event) or event.button != 1:
return
if event.name == 'button_press_event' and self.ax.contains(event)[0]:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
if (event.name == 'button_release_event'
or event.name == 'button_press_event' and not self.ax.contains(event)[0]):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
xdata, ydata = self._get_data_coords(event)
val = self._value_in_bounds(
xdata if self.orientation == 'horizontal' else ydata)
if val not in [None, self.val]:
self.set_val(val)
def _format(self, val):
"""Pretty-print *val*."""
if self.valfmt is not None:
return self.valfmt % val
else:
_, s, _ = self._fmt.format_ticks([self.valmin, val, self.valmax])
# fmt.get_offset is actually the multiplicative factor, if any.
return s + self._fmt.get_offset()
def set_val(self, val):
"""
Set slider value to *val*.
Parameters
----------
val : float
"""
if self.orientation == 'vertical':
self.poly.set_height(val - self.poly.get_y())
self._handle.set_ydata([val])
else:
self.poly.set_width(val - self.poly.get_x())
self._handle.set_xdata([val])
self.valtext.set_text(self._format(val))
if self.drawon:
self.ax.get_figure(root=True).canvas.draw_idle()
self.val = val
if self.eventson:
self._observers.process('changed', val)
def on_changed(self, func):
"""
Connect *func* as callback function to changes of the slider value.
Parameters
----------
func : callable
Function to call when slider is changed.
The function must accept a single float as its arguments.
Returns
-------
int
Connection id (which can be used to disconnect *func*).
"""
return self._observers.connect('changed', lambda val: func(val))
class RangeSlider(SliderBase):
"""
A slider representing a range of floating point values. Defines the min and
max of the range via the *val* attribute as a tuple of (min, max).
Create a slider that defines a range contained within [*valmin*, *valmax*]
in Axes *ax*. For the slider to remain responsive you must maintain a
reference to it. Call :meth:`on_changed` to connect to the slider event.
Attributes
----------
val : tuple of float
Slider value.
"""
def __init__(
self,
ax,
label,
valmin,
valmax,
*,
valinit=None,
valfmt=None,
closedmin=True,
closedmax=True,
dragging=True,
valstep=None,
orientation="horizontal",
track_color='lightgrey',
handle_style=None,
**kwargs,
):
"""
Parameters
----------
ax : Axes
The Axes to put the slider in.
label : str
Slider label.
valmin : float
The minimum value of the slider.
valmax : float
The maximum value of the slider.
valinit : tuple of float or None, default: None
The initial positions of the slider. If None the initial positions
will be at the 25th and 75th percentiles of the range.
valfmt : str, default: None
%-format string used to format the slider values. If None, a
`.ScalarFormatter` is used instead.
closedmin : bool, default: True
Whether the slider interval is closed on the bottom.
closedmax : bool, default: True
Whether the slider interval is closed on the top.
dragging : bool, default: True
If True the slider can be dragged by the mouse.
valstep : float, default: None
If given, the slider will snap to multiples of *valstep*.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The orientation of the slider.
track_color : :mpltype:`color`, default: 'lightgrey'
The color of the background track. The track is accessible for
further styling via the *track* attribute.
handle_style : dict
Properties of the slider handles. Default values are
========= ===== ======= =========================================
Key Value Default Description
========= ===== ======= =========================================
facecolor color 'white' The facecolor of the slider handles.
edgecolor color '.75' The edgecolor of the slider handles.
size int 10 The size of the slider handles in points.
========= ===== ======= =========================================
Other values will be transformed as marker{foo} and passed to the
`~.Line2D` constructor. e.g. ``handle_style = {'style'='x'}`` will
result in ``markerstyle = 'x'``.
Notes
-----
Additional kwargs are passed on to ``self.poly`` which is the
`~matplotlib.patches.Polygon` that draws the slider knob. See the
`.Polygon` documentation for valid property names (``facecolor``,
``edgecolor``, ``alpha``, etc.).
"""
super().__init__(ax, orientation, closedmin, closedmax,
valmin, valmax, valfmt, dragging, valstep)
# Set a value to allow _value_in_bounds() to work.
self.val = (valmin, valmax)
if valinit is None:
# Place at the 25th and 75th percentiles
extent = valmax - valmin
valinit = np.array([valmin + extent * 0.25,
valmin + extent * 0.75])
else:
valinit = self._value_in_bounds(valinit)
self.val = valinit
self.valinit = valinit
defaults = {'facecolor': 'white', 'edgecolor': '.75', 'size': 10}
handle_style = {} if handle_style is None else handle_style
marker_props = {
f'marker{k}': v for k, v in {**defaults, **handle_style}.items()
}
if orientation == "vertical":
self.track = Rectangle(
(.25, 0), .5, 2,
transform=ax.transAxes,
facecolor=track_color
)
ax.add_patch(self.track)
poly_transform = self.ax.get_yaxis_transform(which="grid")
handleXY_1 = [.5, valinit[0]]
handleXY_2 = [.5, valinit[1]]
else:
self.track = Rectangle(
(0, .25), 1, .5,
transform=ax.transAxes,
facecolor=track_color
)
ax.add_patch(self.track)
poly_transform = self.ax.get_xaxis_transform(which="grid")
handleXY_1 = [valinit[0], .5]
handleXY_2 = [valinit[1], .5]
self.poly = Polygon(np.zeros([5, 2]), **kwargs)
self._update_selection_poly(*valinit)
self.poly.set_transform(poly_transform)
self.poly.get_path()._interpolation_steps = 100
self.ax.add_patch(self.poly)
self.ax._request_autoscale_view()
self._handles = [
ax.plot(
*handleXY_1,
"o",
**marker_props,
clip_on=False
)[0],
ax.plot(
*handleXY_2,
"o",
**marker_props,
clip_on=False
)[0]
]
if orientation == "vertical":
self.label = ax.text(
0.5,
1.02,
label,
transform=ax.transAxes,
verticalalignment="bottom",
horizontalalignment="center",
)
self.valtext = ax.text(
0.5,
-0.02,
self._format(valinit),
transform=ax.transAxes,
verticalalignment="top",
horizontalalignment="center",
)
else:
self.label = ax.text(
-0.02,
0.5,
label,
transform=ax.transAxes,
verticalalignment="center",
horizontalalignment="right",
)
self.valtext = ax.text(
1.02,
0.5,
self._format(valinit),
transform=ax.transAxes,
verticalalignment="center",
horizontalalignment="left",
)
self._active_handle = None
self.set_val(valinit)
def _update_selection_poly(self, vmin, vmax):
"""
Update the vertices of the *self.poly* slider in-place
to cover the data range *vmin*, *vmax*.
"""
# The vertices are positioned
# 1 ------ 2
# | |
# 0, 4 ---- 3
verts = self.poly.xy
if self.orientation == "vertical":
verts[0] = verts[4] = .25, vmin
verts[1] = .25, vmax
verts[2] = .75, vmax
verts[3] = .75, vmin
else:
verts[0] = verts[4] = vmin, .25
verts[1] = vmin, .75
verts[2] = vmax, .75
verts[3] = vmax, .25
def _min_in_bounds(self, min):
"""Ensure the new min value is between valmin and self.val[1]."""
if min <= self.valmin:
if not self.closedmin:
return self.val[0]
min = self.valmin
if min > self.val[1]:
min = self.val[1]
return self._stepped_value(min)
def _max_in_bounds(self, max):
"""Ensure the new max value is between valmax and self.val[0]."""
if max >= self.valmax:
if not self.closedmax:
return self.val[1]
max = self.valmax
if max <= self.val[0]:
max = self.val[0]
return self._stepped_value(max)
def _value_in_bounds(self, vals):
"""Clip min, max values to the bounds."""
return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))
def _update_val_from_pos(self, pos):
"""Update the slider value based on a given position."""
idx = np.argmin(np.abs(self.val - pos))
if idx == 0:
val = self._min_in_bounds(pos)
self.set_min(val)
else:
val = self._max_in_bounds(pos)
self.set_max(val)
if self._active_handle:
if self.orientation == "vertical":
self._active_handle.set_ydata([val])
else:
self._active_handle.set_xdata([val])
def _update(self, event):
"""Update the slider position."""
if self.ignore(event) or event.button != 1:
return
if event.name == "button_press_event" and self.ax.contains(event)[0]:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
if (event.name == "button_release_event"
or event.name == "button_press_event" and not self.ax.contains(event)[0]):
self.drag_active = False
event.canvas.release_mouse(self.ax)
self._active_handle = None
return
# determine which handle was grabbed
xdata, ydata = self._get_data_coords(event)
handle_index = np.argmin(np.abs(
[h.get_xdata()[0] - xdata for h in self._handles]
if self.orientation == "horizontal" else
[h.get_ydata()[0] - ydata for h in self._handles]))
handle = self._handles[handle_index]
# these checks ensure smooth behavior if the handles swap which one
# has a higher value. i.e. if one is dragged over and past the other.
if handle is not self._active_handle:
self._active_handle = handle
self._update_val_from_pos(xdata if self.orientation == "horizontal" else ydata)
def _format(self, val):
"""Pretty-print *val*."""
if self.valfmt is not None:
return f"({self.valfmt % val[0]}, {self.valfmt % val[1]})"
else:
_, s1, s2, _ = self._fmt.format_ticks(
[self.valmin, *val, self.valmax]
)
# fmt.get_offset is actually the multiplicative factor, if any.
s1 += self._fmt.get_offset()
s2 += self._fmt.get_offset()
# Use f string to avoid issues with backslashes when cast to a str
return f"({s1}, {s2})"
def set_min(self, min):
"""
Set the lower value of the slider to *min*.
Parameters
----------
min : float
"""
self.set_val((min, self.val[1]))
def set_max(self, max):
"""
Set the lower value of the slider to *max*.
Parameters
----------
max : float
"""
self.set_val((self.val[0], max))
def set_val(self, val):
"""
Set slider value to *val*.
Parameters
----------
val : tuple or array-like of float
"""
val = np.sort(val)
_api.check_shape((2,), val=val)
# Reset value to allow _value_in_bounds() to work.
self.val = (self.valmin, self.valmax)
vmin, vmax = self._value_in_bounds(val)
self._update_selection_poly(vmin, vmax)
if self.orientation == "vertical":
self._handles[0].set_ydata([vmin])
self._handles[1].set_ydata([vmax])
else:
self._handles[0].set_xdata([vmin])
self._handles[1].set_xdata([vmax])
self.valtext.set_text(self._format((vmin, vmax)))
if self.drawon:
self.ax.get_figure(root=True).canvas.draw_idle()
self.val = (vmin, vmax)
if self.eventson:
self._observers.process("changed", (vmin, vmax))
def on_changed(self, func):
"""
Connect *func* as callback function to changes of the slider value.
Parameters
----------
func : callable
Function to call when slider is changed. The function
must accept a 2-tuple of floats as its argument.
Returns
-------
int
Connection id (which can be used to disconnect *func*).
"""
return self._observers.connect('changed', lambda val: func(val))
def _expand_text_props(props):
props = cbook.normalize_kwargs(props, mtext.Text)
return cycler(**props)() if props else itertools.repeat({})
class CheckButtons(AxesWidget):
r"""
A GUI neutral set of check buttons.
For the check buttons to remain responsive you must keep a
reference to this object.
Connect to the CheckButtons with the `.on_clicked` method.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
labels : list of `~matplotlib.text.Text`
The text label objects of the check buttons.
"""
def __init__(self, ax, labels, actives=None, *, useblit=True,
label_props=None, frame_props=None, check_props=None):
"""
Add check buttons to `~.axes.Axes` instance *ax*.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
labels : list of str
The labels of the check buttons.
actives : list of bool, optional
The initial check states of the buttons. The list must have the
same length as *labels*. If not given, all buttons are unchecked.
useblit : bool, default: True
Use blitting for faster drawing if supported by the backend.
See the tutorial :ref:`blitting` for details.
.. versionadded:: 3.7
label_props : dict, optional
Dictionary of `.Text` properties to be used for the labels.
.. versionadded:: 3.7
frame_props : dict, optional
Dictionary of scatter `.Collection` properties to be used for the
check button frame. Defaults (label font size / 2)**2 size, black
edgecolor, no facecolor, and 1.0 linewidth.
.. versionadded:: 3.7
check_props : dict, optional
Dictionary of scatter `.Collection` properties to be used for the
check button check. Defaults to (label font size / 2)**2 size,
black color, and 1.0 linewidth.
.. versionadded:: 3.7
"""
super().__init__(ax)
_api.check_isinstance((dict, None), label_props=label_props,
frame_props=frame_props, check_props=check_props)
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if actives is None:
actives = [False] * len(labels)
self._useblit = useblit and self.canvas.supports_blit
self._background = None
ys = np.linspace(1, 0, len(labels)+2)[1:-1]
label_props = _expand_text_props(label_props)
self.labels = [
ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment="left", verticalalignment="center",
**props)
for y, label, props in zip(ys, labels, label_props)]
text_size = np.array([text.get_fontsize() for text in self.labels]) / 2
frame_props = {
's': text_size**2,
'linewidth': 1,
**cbook.normalize_kwargs(frame_props, collections.PathCollection),
'marker': 's',
'transform': ax.transAxes,
}
frame_props.setdefault('facecolor', frame_props.get('color', 'none'))
frame_props.setdefault('edgecolor', frame_props.pop('color', 'black'))
self._frames = ax.scatter([0.15] * len(ys), ys, **frame_props)
check_props = {
'linewidth': 1,
's': text_size**2,
**cbook.normalize_kwargs(check_props, collections.PathCollection),
'marker': 'x',
'transform': ax.transAxes,
'animated': self._useblit,
}
check_props.setdefault('facecolor', check_props.pop('color', 'black'))
self._checks = ax.scatter([0.15] * len(ys), ys, **check_props)
# The user may have passed custom colours in check_props, so we need to
# create the checks (above), and modify the visibility after getting
# whatever the user set.
self._init_status(actives)
self.connect_event('button_press_event', self._clicked)
if self._useblit:
self.connect_event('draw_event', self._clear)
self._observers = cbook.CallbackRegistry(signals=["clicked"])
def _clear(self, event):
"""Internal event handler to clear the buttons."""
if self.ignore(event) or self.canvas.is_saving():
return
self._background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self._checks)
def _clicked(self, event):
if self.ignore(event) or event.button != 1 or not self.ax.contains(event)[0]:
return
idxs = [ # Indices of frames and of texts that contain the event.
*self._frames.contains(event)[1]["ind"],
*[i for i, text in enumerate(self.labels) if text.contains(event)[0]]]
if idxs:
coords = self._frames.get_offset_transform().transform(
self._frames.get_offsets())
self.set_active( # Closest index, only looking in idxs.
idxs[(((event.x, event.y) - coords[idxs]) ** 2).sum(-1).argmin()])
def set_label_props(self, props):
"""
Set properties of the `.Text` labels.
.. versionadded:: 3.7
Parameters
----------
props : dict
Dictionary of `.Text` properties to be used for the labels.
"""
_api.check_isinstance(dict, props=props)
props = _expand_text_props(props)
for text, prop in zip(self.labels, props):
text.update(prop)
def set_frame_props(self, props):
"""
Set properties of the check button frames.
.. versionadded:: 3.7
Parameters
----------
props : dict
Dictionary of `.Collection` properties to be used for the check
button frames.
"""
_api.check_isinstance(dict, props=props)
if 's' in props: # Keep API consistent with constructor.
props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels))
self._frames.update(props)
def set_check_props(self, props):
"""
Set properties of the check button checks.
.. versionadded:: 3.7
Parameters
----------
props : dict
Dictionary of `.Collection` properties to be used for the check
button check.
"""
_api.check_isinstance(dict, props=props)
if 's' in props: # Keep API consistent with constructor.
props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels))
actives = self.get_status()
self._checks.update(props)
# If new colours are supplied, then we must re-apply the status.
self._init_status(actives)
def set_active(self, index, state=None):
"""
Modify the state of a check button by index.
Callbacks will be triggered if :attr:`eventson` is True.
Parameters
----------
index : int
Index of the check button to toggle.
state : bool, optional
If a boolean value, set the state explicitly. If no value is
provided, the state is toggled.
Raises
------
ValueError
If *index* is invalid.
TypeError
If *state* is not boolean.
"""
if index not in range(len(self.labels)):
raise ValueError(f'Invalid CheckButton index: {index}')
_api.check_isinstance((bool, None), state=state)
invisible = colors.to_rgba('none')
facecolors = self._checks.get_facecolor()
if state is None:
state = colors.same_color(facecolors[index], invisible)
facecolors[index] = self._active_check_colors[index] if state else invisible
self._checks.set_facecolor(facecolors)
if self.drawon:
if self._useblit:
if self._background is not None:
self.canvas.restore_region(self._background)
self.ax.draw_artist(self._checks)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw()
if self.eventson:
self._observers.process('clicked', self.labels[index].get_text())
def _init_status(self, actives):
"""
Initialize properties to match active status.
The user may have passed custom colours in *check_props* to the
constructor, or to `.set_check_props`, so we need to modify the
visibility after getting whatever the user set.
"""
self._active_check_colors = self._checks.get_facecolor()
if len(self._active_check_colors) == 1:
self._active_check_colors = np.repeat(self._active_check_colors,
len(actives), axis=0)
self._checks.set_facecolor(
[ec if active else "none"
for ec, active in zip(self._active_check_colors, actives)])
def clear(self):
"""Uncheck all checkboxes."""
self._checks.set_facecolor(['none'] * len(self._active_check_colors))
if hasattr(self, '_lines'):
for l1, l2 in self._lines:
l1.set_visible(False)
l2.set_visible(False)
if self.drawon:
self.canvas.draw()
if self.eventson:
# Call with no label, as all checkboxes are being cleared.
self._observers.process('clicked', None)
def get_status(self):
"""
Return a list of the status (True/False) of all of the check buttons.
"""
return [not colors.same_color(color, colors.to_rgba("none"))
for color in self._checks.get_facecolors()]
def get_checked_labels(self):
"""Return a list of labels currently checked by user."""
return [l.get_text() for l, box_checked in
zip(self.labels, self.get_status())
if box_checked]
def on_clicked(self, func):
"""
Connect the callback function *func* to button click events.
Parameters
----------
func : callable
When the button is clicked, call *func* with button label.
When all buttons are cleared, call *func* with None.
The callback func must have the signature::
def func(label: str | None) -> Any
Return values may exist, but are ignored.
Returns
-------
A connection id, which can be used to disconnect the callback.
"""
return self._observers.connect('clicked', lambda text: func(text))
def disconnect(self, cid):
"""Remove the observer with connection id *cid*."""
self._observers.disconnect(cid)
class TextBox(AxesWidget):
"""
A GUI neutral text input box.
For the text box to remain responsive you must keep a reference to it.
Call `.on_text_change` to be updated whenever the text changes.
Call `.on_submit` to be updated whenever the user hits enter or
leaves the text entry field.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
label : `~matplotlib.text.Text`
color : :mpltype:`color`
The color of the text box when not hovering.
hovercolor : :mpltype:`color`
The color of the text box when hovering.
"""
def __init__(self, ax, label, initial='', *,
color='.95', hovercolor='1', label_pad=.01,
textalignment="left"):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance the button will be placed into.
label : str
Label for this text box.
initial : str
Initial value in the text box.
color : :mpltype:`color`
The color of the box.
hovercolor : :mpltype:`color`
The color of the box when the mouse is over it.
label_pad : float
The distance between the label and the right side of the textbox.
textalignment : {'left', 'center', 'right'}
The horizontal location of the text.
"""
super().__init__(ax)
self._text_position = _api.check_getitem(
{"left": 0.05, "center": 0.5, "right": 0.95},
textalignment=textalignment)
self.label = ax.text(
-label_pad, 0.5, label, transform=ax.transAxes,
verticalalignment='center', horizontalalignment='right')
# TextBox's text object should not parse mathtext at all.
self.text_disp = self.ax.text(
self._text_position, 0.5, initial, transform=self.ax.transAxes,
verticalalignment='center', horizontalalignment=textalignment,
parse_math=False)
self._observers = cbook.CallbackRegistry(signals=["change", "submit"])
ax.set(
xlim=(0, 1), ylim=(0, 1), # s.t. cursor appears from first click.
navigate=False, facecolor=color,
xticks=[], yticks=[])
self.cursor_index = 0
self.cursor = ax.vlines(0, 0, 0, visible=False, color="k", lw=1,
transform=mpl.transforms.IdentityTransform())
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
self.connect_event('key_press_event', self._keypress)
self.connect_event('resize_event', self._resize)
self.color = color
self.hovercolor = hovercolor
self.capturekeystrokes = False
@property
def text(self):
return self.text_disp.get_text()
def _rendercursor(self):
# this is a hack to figure out where the cursor should go.
# we draw the text up to where the cursor should go, measure
# and save its dimensions, draw the real text, then put the cursor
# at the saved dimensions
# This causes a single extra draw if the figure has never been rendered
# yet, which should be fine as we're going to repeatedly re-render the
# figure later anyways.
fig = self.ax.get_figure(root=True)
if fig._get_renderer() is None:
fig.canvas.draw()
text = self.text_disp.get_text() # Save value before overwriting it.
widthtext = text[:self.cursor_index]
bb_text = self.text_disp.get_window_extent()
self.text_disp.set_text(widthtext or ",")
bb_widthtext = self.text_disp.get_window_extent()
if bb_text.y0 == bb_text.y1: # Restoring the height if no text.
bb_text.y0 -= bb_widthtext.height / 2
bb_text.y1 += bb_widthtext.height / 2
elif not widthtext: # Keep width to 0.
bb_text.x1 = bb_text.x0
else: # Move the cursor using width of bb_widthtext.
bb_text.x1 = bb_text.x0 + bb_widthtext.width
self.cursor.set(
segments=[[(bb_text.x1, bb_text.y0), (bb_text.x1, bb_text.y1)]],
visible=True)
self.text_disp.set_text(text)
fig.canvas.draw()
def _release(self, event):
if self.ignore(event):
return
if event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
def _keypress(self, event):
if self.ignore(event):
return
if self.capturekeystrokes:
key = event.key
text = self.text
if len(key) == 1:
text = (text[:self.cursor_index] + key +
text[self.cursor_index:])
self.cursor_index += 1
elif key == "right":
if self.cursor_index != len(text):
self.cursor_index += 1
elif key == "left":
if self.cursor_index != 0:
self.cursor_index -= 1
elif key == "home":
self.cursor_index = 0
elif key == "end":
self.cursor_index = len(text)
elif key == "backspace":
if self.cursor_index != 0:
text = (text[:self.cursor_index - 1] +
text[self.cursor_index:])
self.cursor_index -= 1
elif key == "delete":
if self.cursor_index != len(self.text):
text = (text[:self.cursor_index] +
text[self.cursor_index + 1:])
self.text_disp.set_text(text)
self._rendercursor()
if self.eventson:
self._observers.process('change', self.text)
if key in ["enter", "return"]:
self._observers.process('submit', self.text)
def set_val(self, val):
newval = str(val)
if self.text == newval:
return
self.text_disp.set_text(newval)
self._rendercursor()
if self.eventson:
self._observers.process('change', self.text)
self._observers.process('submit', self.text)
def begin_typing(self):
self.capturekeystrokes = True
# Disable keypress shortcuts, which may otherwise cause the figure to
# be saved, closed, etc., until the user stops typing. The way to
# achieve this depends on whether toolmanager is in use.
stack = ExitStack() # Register cleanup actions when user stops typing.
self._on_stop_typing = stack.close
toolmanager = getattr(
self.ax.get_figure(root=True).canvas.manager, "toolmanager", None)
if toolmanager is not None:
# If using toolmanager, lock keypresses, and plan to release the
# lock when typing stops.
toolmanager.keypresslock(self)
stack.callback(toolmanager.keypresslock.release, self)
else:
# If not using toolmanager, disable all keypress-related rcParams.
# Avoid spurious warnings if keymaps are getting deprecated.
with _api.suppress_matplotlib_deprecation_warning():
stack.enter_context(mpl.rc_context(
{k: [] for k in mpl.rcParams if k.startswith("keymap.")}))
def stop_typing(self):
if self.capturekeystrokes:
self._on_stop_typing()
self._on_stop_typing = None
notifysubmit = True
else:
notifysubmit = False
self.capturekeystrokes = False
self.cursor.set_visible(False)
self.ax.get_figure(root=True).canvas.draw()
if notifysubmit and self.eventson:
# Because process() might throw an error in the user's code, only
# call it once we've already done our cleanup.
self._observers.process('submit', self.text)
def _click(self, event):
if self.ignore(event):
return
if not self.ax.contains(event)[0]:
self.stop_typing()
return
if not self.eventson:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
if not self.capturekeystrokes:
self.begin_typing()
self.cursor_index = self.text_disp._char_index_at(event.x)
self._rendercursor()
def _resize(self, event):
self.stop_typing()
def _motion(self, event):
if self.ignore(event):
return
c = self.hovercolor if self.ax.contains(event)[0] else self.color
if not colors.same_color(c, self.ax.get_facecolor()):
self.ax.set_facecolor(c)
if self.drawon:
self.ax.get_figure(root=True).canvas.draw()
def on_text_change(self, func):
"""
When the text changes, call this *func* with event.
A connection id is returned which can be used to disconnect.
"""
return self._observers.connect('change', lambda text: func(text))
def on_submit(self, func):
"""
When the user hits enter or leaves the submission box, call this
*func* with event.
A connection id is returned which can be used to disconnect.
"""
return self._observers.connect('submit', lambda text: func(text))
def disconnect(self, cid):
"""Remove the observer with connection id *cid*."""
self._observers.disconnect(cid)
class RadioButtons(AxesWidget):
"""
A GUI neutral radio button.
For the buttons to remain responsive you must keep a reference to this
object.
Connect to the RadioButtons with the `.on_clicked` method.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
activecolor : :mpltype:`color`
The color of the selected button.
labels : list of `.Text`
The button labels.
value_selected : str
The label text of the currently selected button.
index_selected : int
The index of the selected button.
"""
def __init__(self, ax, labels, active=0, activecolor=None, *,
useblit=True, label_props=None, radio_props=None):
"""
Add radio buttons to an `~.axes.Axes`.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The Axes to add the buttons to.
labels : list of str
The button labels.
active : int
The index of the initially selected button.
activecolor : :mpltype:`color`
The color of the selected button. The default is ``'blue'`` if not
specified here or in *radio_props*.
useblit : bool, default: True
Use blitting for faster drawing if supported by the backend.
See the tutorial :ref:`blitting` for details.
.. versionadded:: 3.7
label_props : dict or list of dict, optional
Dictionary of `.Text` properties to be used for the labels.
.. versionadded:: 3.7
radio_props : dict, optional
Dictionary of scatter `.Collection` properties to be used for the
radio buttons. Defaults to (label font size / 2)**2 size, black
edgecolor, and *activecolor* facecolor (when active).
.. note::
If a facecolor is supplied in *radio_props*, it will override
*activecolor*. This may be used to provide an active color per
button.
.. versionadded:: 3.7
"""
super().__init__(ax)
_api.check_isinstance((dict, None), label_props=label_props,
radio_props=radio_props)
radio_props = cbook.normalize_kwargs(radio_props,
collections.PathCollection)
if activecolor is not None:
if 'facecolor' in radio_props:
_api.warn_external(
'Both the *activecolor* parameter and the *facecolor* '
'key in the *radio_props* parameter has been specified. '
'*activecolor* will be ignored.')
else:
activecolor = 'blue' # Default.
self._activecolor = activecolor
self._initial_active = active
self.value_selected = labels[active]
self.index_selected = active
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
ys = np.linspace(1, 0, len(labels) + 2)[1:-1]
self._useblit = useblit and self.canvas.supports_blit
self._background = None
label_props = _expand_text_props(label_props)
self.labels = [
ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment="left", verticalalignment="center",
**props)
for y, label, props in zip(ys, labels, label_props)]
text_size = np.array([text.get_fontsize() for text in self.labels]) / 2
radio_props = {
's': text_size**2,
**radio_props,
'marker': 'o',
'transform': ax.transAxes,
'animated': self._useblit,
}
radio_props.setdefault('edgecolor', radio_props.get('color', 'black'))
radio_props.setdefault('facecolor',
radio_props.pop('color', activecolor))
self._buttons = ax.scatter([.15] * len(ys), ys, **radio_props)
# The user may have passed custom colours in radio_props, so we need to
# create the radios, and modify the visibility after getting whatever
# the user set.
self._active_colors = self._buttons.get_facecolor()
if len(self._active_colors) == 1:
self._active_colors = np.repeat(self._active_colors, len(labels),
axis=0)
self._buttons.set_facecolor(
[activecolor if i == active else "none"
for i, activecolor in enumerate(self._active_colors)])
self.connect_event('button_press_event', self._clicked)
if self._useblit:
self.connect_event('draw_event', self._clear)
self._observers = cbook.CallbackRegistry(signals=["clicked"])
def _clear(self, event):
"""Internal event handler to clear the buttons."""
if self.ignore(event) or self.canvas.is_saving():
return
self._background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self._buttons)
def _clicked(self, event):
if self.ignore(event) or event.button != 1 or not self.ax.contains(event)[0]:
return
idxs = [ # Indices of buttons and of texts that contain the event.
*self._buttons.contains(event)[1]["ind"],
*[i for i, text in enumerate(self.labels) if text.contains(event)[0]]]
if idxs:
coords = self._buttons.get_offset_transform().transform(
self._buttons.get_offsets())
self.set_active( # Closest index, only looking in idxs.
idxs[(((event.x, event.y) - coords[idxs]) ** 2).sum(-1).argmin()])
def set_label_props(self, props):
"""
Set properties of the `.Text` labels.
.. versionadded:: 3.7
Parameters
----------
props : dict
Dictionary of `.Text` properties to be used for the labels.
"""
_api.check_isinstance(dict, props=props)
props = _expand_text_props(props)
for text, prop in zip(self.labels, props):
text.update(prop)
def set_radio_props(self, props):
"""
Set properties of the `.Text` labels.
.. versionadded:: 3.7
Parameters
----------
props : dict
Dictionary of `.Collection` properties to be used for the radio
buttons.
"""
_api.check_isinstance(dict, props=props)
if 's' in props: # Keep API consistent with constructor.
props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels))
self._buttons.update(props)
self._active_colors = self._buttons.get_facecolor()
if len(self._active_colors) == 1:
self._active_colors = np.repeat(self._active_colors,
len(self.labels), axis=0)
self._buttons.set_facecolor(
[activecolor if text.get_text() == self.value_selected else "none"
for text, activecolor in zip(self.labels, self._active_colors)])
@property
def activecolor(self):
return self._activecolor
@activecolor.setter
def activecolor(self, activecolor):
colors._check_color_like(activecolor=activecolor)
self._activecolor = activecolor
self.set_radio_props({'facecolor': activecolor})
def set_active(self, index):
"""
Select button with number *index*.
Callbacks will be triggered if :attr:`eventson` is True.
Parameters
----------
index : int
The index of the button to activate.
Raises
------
ValueError
If the index is invalid.
"""
if index not in range(len(self.labels)):
raise ValueError(f'Invalid RadioButton index: {index}')
self.value_selected = self.labels[index].get_text()
self.index_selected = index
button_facecolors = self._buttons.get_facecolor()
button_facecolors[:] = colors.to_rgba("none")
button_facecolors[index] = colors.to_rgba(self._active_colors[index])
self._buttons.set_facecolor(button_facecolors)
if self.drawon:
if self._useblit:
if self._background is not None:
self.canvas.restore_region(self._background)
self.ax.draw_artist(self._buttons)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw()
if self.eventson:
self._observers.process('clicked', self.labels[index].get_text())
def clear(self):
"""Reset the active button to the initially active one."""
self.set_active(self._initial_active)
def on_clicked(self, func):
"""
Connect the callback function *func* to button click events.
Parameters
----------
func : callable
When the button is clicked, call *func* with button label.
When all buttons are cleared, call *func* with None.
The callback func must have the signature::
def func(label: str | None) -> Any
Return values may exist, but are ignored.
Returns
-------
A connection id, which can be used to disconnect the callback.
"""
return self._observers.connect('clicked', func)
def disconnect(self, cid):
"""Remove the observer with connection id *cid*."""
self._observers.disconnect(cid)
class SubplotTool(Widget):
"""
A tool to adjust the subplot params of a `.Figure`.
"""
def __init__(self, targetfig, toolfig):
"""
Parameters
----------
targetfig : `~matplotlib.figure.Figure`
The figure instance to adjust.
toolfig : `~matplotlib.figure.Figure`
The figure instance to embed the subplot tool into.
"""
self.figure = toolfig
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
toolfig.suptitle("Click on slider to adjust subplot param")
self._sliders = []
names = ["left", "bottom", "right", "top", "wspace", "hspace"]
# The last subplot, removed below, keeps space for the "Reset" button.
for name, ax in zip(names, toolfig.subplots(len(names) + 1)):
ax.set_navigate(False)
slider = Slider(ax, name, 0, 1,
valinit=getattr(targetfig.subplotpars, name))
slider.on_changed(self._on_slider_changed)
self._sliders.append(slider)
toolfig.axes[-1].remove()
(self.sliderleft, self.sliderbottom, self.sliderright, self.slidertop,
self.sliderwspace, self.sliderhspace) = self._sliders
for slider in [self.sliderleft, self.sliderbottom,
self.sliderwspace, self.sliderhspace]:
slider.closedmax = False
for slider in [self.sliderright, self.slidertop]:
slider.closedmin = False
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
self.buttonreset.on_clicked(self._on_reset)
def _on_slider_changed(self, _):
self.targetfig.subplots_adjust(
**{slider.label.get_text(): slider.val
for slider in self._sliders})
if self.drawon:
self.targetfig.canvas.draw()
def _on_reset(self, event):
with ExitStack() as stack:
# Temporarily disable drawing on self and self's sliders, and
# disconnect slider events (as the subplotparams can be temporarily
# invalid, depending on the order in which they are restored).
stack.enter_context(cbook._setattr_cm(self, drawon=False))
for slider in self._sliders:
stack.enter_context(
cbook._setattr_cm(slider, drawon=False, eventson=False))
# Reset the slider to the initial position.
for slider in self._sliders:
slider.reset()
if self.drawon:
event.canvas.draw() # Redraw the subplottool canvas.
self._on_slider_changed(None) # Apply changes to the target window.
class Cursor(AxesWidget):
"""
A crosshair cursor that spans the Axes and moves with mouse cursor.
For the cursor to remain responsive you must keep a reference to it.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` to attach the cursor to.
horizOn : bool, default: True
Whether to draw the horizontal line.
vertOn : bool, default: True
Whether to draw the vertical line.
useblit : bool, default: False
Use blitting for faster drawing if supported by the backend.
See the tutorial :ref:`blitting` for details.
Other Parameters
----------------
**lineprops
`.Line2D` properties that control the appearance of the lines.
See also `~.Axes.axhline`.
Examples
--------
See :doc:`/gallery/widgets/cursor`.
"""
def __init__(self, ax, *, horizOn=True, vertOn=True, useblit=False,
**lineprops):
super().__init__(ax)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.clear)
self.visible = True
self.horizOn = horizOn
self.vertOn = vertOn
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
lineprops['animated'] = True
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
"""Internal event handler to clear the cursor."""
if self.ignore(event) or self.canvas.is_saving():
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def onmove(self, event):
"""Internal event handler to draw the cursor when the mouse moves."""
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if not self.ax.contains(event)[0]:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
xdata, ydata = self._get_data_coords(event)
self.linev.set_xdata((xdata, xdata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_ydata((ydata, ydata))
self.lineh.set_visible(self.visible and self.horizOn)
if not (self.visible and (self.vertOn or self.horizOn)):
return
# Redraw.
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
class MultiCursor(Widget):
"""
Provide a vertical (default) and/or horizontal line cursor shared between
multiple Axes.
For the cursor to remain responsive you must keep a reference to it.
Parameters
----------
canvas : object
This parameter is entirely unused and only kept for back-compatibility.
axes : list of `~matplotlib.axes.Axes`
The `~.axes.Axes` to attach the cursor to.
useblit : bool, default: True
Use blitting for faster drawing if supported by the backend.
See the tutorial :ref:`blitting`
for details.
horizOn : bool, default: False
Whether to draw the horizontal line.
vertOn : bool, default: True
Whether to draw the vertical line.
Other Parameters
----------------
**lineprops
`.Line2D` properties that control the appearance of the lines.
See also `~.Axes.axhline`.
Examples
--------
See :doc:`/gallery/widgets/multicursor`.
"""
def __init__(self, canvas, axes, *, useblit=True, horizOn=False, vertOn=True,
**lineprops):
# canvas is stored only to provide the deprecated .canvas attribute;
# once it goes away the unused argument won't need to be stored at all.
self._canvas = canvas
self.axes = axes
self.horizOn = horizOn
self.vertOn = vertOn
self._canvas_infos = {
ax.get_figure(root=True).canvas:
{"cids": [], "background": None} for ax in axes}
xmin, xmax = axes[-1].get_xlim()
ymin, ymax = axes[-1].get_ylim()
xmid = 0.5 * (xmin + xmax)
ymid = 0.5 * (ymin + ymax)
self.visible = True
self.useblit = (
useblit
and all(canvas.supports_blit for canvas in self._canvas_infos))
if self.useblit:
lineprops['animated'] = True
self.vlines = [ax.axvline(xmid, visible=False, **lineprops)
for ax in axes]
self.hlines = [ax.axhline(ymid, visible=False, **lineprops)
for ax in axes]
self.connect()
def connect(self):
"""Connect events."""
for canvas, info in self._canvas_infos.items():
info["cids"] = [
canvas.mpl_connect('motion_notify_event', self.onmove),
canvas.mpl_connect('draw_event', self.clear),
]
def disconnect(self):
"""Disconnect events."""
for canvas, info in self._canvas_infos.items():
for cid in info["cids"]:
canvas.mpl_disconnect(cid)
info["cids"].clear()
def clear(self, event):
"""Clear the cursor."""
if self.ignore(event):
return
if self.useblit:
for canvas, info in self._canvas_infos.items():
# someone has switched the canvas on us! This happens if
# `savefig` needs to save to a format the previous backend did
# not support (e.g. saving a figure using an Agg based backend
# saved to a vector format).
if canvas is not canvas.figure.canvas:
continue
info["background"] = canvas.copy_from_bbox(canvas.figure.bbox)
def onmove(self, event):
axs = [ax for ax in self.axes if ax.contains(event)[0]]
if self.ignore(event) or not axs or not event.canvas.widgetlock.available(self):
return
ax = cbook._topmost_artist(axs)
xdata, ydata = ((event.xdata, event.ydata) if event.inaxes is ax
else ax.transData.inverted().transform((event.x, event.y)))
for line in self.vlines:
line.set_xdata((xdata, xdata))
line.set_visible(self.visible and self.vertOn)
for line in self.hlines:
line.set_ydata((ydata, ydata))
line.set_visible(self.visible and self.horizOn)
if not (self.visible and (self.vertOn or self.horizOn)):
return
# Redraw.
if self.useblit:
for canvas, info in self._canvas_infos.items():
if info["background"]:
canvas.restore_region(info["background"])
if self.vertOn:
for ax, line in zip(self.axes, self.vlines):
ax.draw_artist(line)
if self.horizOn:
for ax, line in zip(self.axes, self.hlines):
ax.draw_artist(line)
for canvas in self._canvas_infos:
canvas.blit()
else:
for canvas in self._canvas_infos:
canvas.draw_idle()
class _SelectorWidget(AxesWidget):
def __init__(self, ax, onselect=None, useblit=False, button=None,
state_modifier_keys=None, use_data_coordinates=False):
super().__init__(ax)
self._visible = True
if onselect is None:
self.onselect = lambda *args: None
else:
self.onselect = onselect
self.useblit = useblit and self.canvas.supports_blit
self.connect_default_events()
self._state_modifier_keys = dict(move=' ', clear='escape',
square='shift', center='control',
rotate='r')
self._state_modifier_keys.update(state_modifier_keys or {})
self._use_data_coordinates = use_data_coordinates
self.background = None
if isinstance(button, Integral):
self.validButtons = [button]
else:
self.validButtons = button
# Set to True when a selection is completed, otherwise is False
self._selection_completed = False
# will save the data (position at mouseclick)
self._eventpress = None
# will save the data (pos. at mouserelease)
self._eventrelease = None
self._prev_event = None
self._state = set()
def set_active(self, active):
super().set_active(active)
if active:
self.update_background(None)
def _get_animated_artists(self):
"""
Convenience method to get all animated artists of the figure containing
this widget, excluding those already present in self.artists.
The returned tuple is not sorted by 'z_order': z_order sorting is
valid only when considering all artists and not only a subset of all
artists.
"""
return tuple(a for ax_ in self.ax.get_figure().get_axes()
for a in ax_.get_children()
if a.get_animated() and a not in self.artists)
def update_background(self, event):
"""Force an update of the background."""
# If you add a call to `ignore` here, you'll want to check edge case:
# `release` can call a draw event even when `ignore` is True.
if not self.useblit:
return
# Make sure that widget artists don't get accidentally included in the
# background, by re-rendering the background if needed (and then
# re-re-rendering the canvas with the visible widget artists).
# We need to remove all artists which will be drawn when updating
# the selector: if we have animated artists in the figure, it is safer
# to redrawn by default, in case they have updated by the callback
# zorder needs to be respected when redrawing
artists = sorted(self.artists + self._get_animated_artists(),
key=lambda a: a.get_zorder())
needs_redraw = any(artist.get_visible() for artist in artists)
with ExitStack() as stack:
if needs_redraw:
for artist in artists:
stack.enter_context(artist._cm_set(visible=False))
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
if needs_redraw:
for artist in artists:
self.ax.draw_artist(artist)
def connect_default_events(self):
"""Connect the major canvas events to methods."""
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
self.connect_event('key_press_event', self.on_key_press)
self.connect_event('key_release_event', self.on_key_release)
self.connect_event('scroll_event', self.on_scroll)
def ignore(self, event):
# docstring inherited
if not self.active or not self.ax.get_visible():
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
if not hasattr(event, 'button'):
event.button = None
# Only do rectangle selection if event was triggered
# with a desired button
if (self.validButtons is not None
and event.button not in self.validButtons):
return True
# If no button was pressed yet ignore the event if it was out of the Axes.
if self._eventpress is None:
return not self.ax.contains(event)[0]
# If a button was pressed, check if the release-button is the same.
if event.button == self._eventpress.button:
return False
# If a button was pressed, check if the release-button is the same.
return (not self.ax.contains(event)[0] or
event.button != self._eventpress.button)
def update(self):
"""Draw using blit() or draw_idle(), depending on ``self.useblit``."""
if (not self.ax.get_visible() or
self.ax.get_figure(root=True)._get_renderer() is None):
return
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
else:
self.update_background(None)
# We need to draw all artists, which are not included in the
# background, therefore we also draw self._get_animated_artists()
# and we make sure that we respect z_order
artists = sorted(self.artists + self._get_animated_artists(),
key=lambda a: a.get_zorder())
for artist in artists:
self.ax.draw_artist(artist)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def _get_data(self, event):
"""Get the xdata and ydata for event, with limits."""
if event.xdata is None:
return None, None
xdata, ydata = self._get_data_coords(event)
xdata = np.clip(xdata, *self.ax.get_xbound())
ydata = np.clip(ydata, *self.ax.get_ybound())
return xdata, ydata
def _clean_event(self, event):
"""
Preprocess an event:
- Replace *event* by the previous event if *event* has no ``xdata``.
- Get ``xdata`` and ``ydata`` from this widget's Axes, and clip them to the axes
limits.
- Update the previous event.
"""
if event.xdata is None:
event = self._prev_event
else:
event = copy.copy(event)
event.xdata, event.ydata = self._get_data(event)
self._prev_event = event
return event
def press(self, event):
"""Button press handler and validator."""
if not self.ignore(event):
event = self._clean_event(event)
self._eventpress = event
self._prev_event = event
key = event.key or ''
key = key.replace('ctrl', 'control')
# move state is locked in on a button press
if key == self._state_modifier_keys['move']:
self._state.add('move')
self._press(event)
return True
return False
def _press(self, event):
"""Button press event handler."""
def release(self, event):
"""Button release event handler and validator."""
if not self.ignore(event) and self._eventpress:
event = self._clean_event(event)
self._eventrelease = event
self._release(event)
self._eventpress = None
self._eventrelease = None
self._state.discard('move')
return True
return False
def _release(self, event):
"""Button release event handler."""
def onmove(self, event):
"""Cursor move event handler and validator."""
if not self.ignore(event) and self._eventpress:
event = self._clean_event(event)
self._onmove(event)
return True
return False
def _onmove(self, event):
"""Cursor move event handler."""
def on_scroll(self, event):
"""Mouse scroll event handler and validator."""
if not self.ignore(event):
self._on_scroll(event)
def _on_scroll(self, event):
"""Mouse scroll event handler."""
def on_key_press(self, event):
"""Key press event handler and validator for all selection widgets."""
if self.active:
key = event.key or ''
key = key.replace('ctrl', 'control')
if key == self._state_modifier_keys['clear']:
self.clear()
return
for (state, modifier) in self._state_modifier_keys.items():
if modifier in key.split('+'):
# 'rotate' is changing _state on press and is not removed
# from _state when releasing
if state == 'rotate':
if state in self._state:
self._state.discard(state)
else:
self._state.add(state)
else:
self._state.add(state)
self._on_key_press(event)
def _on_key_press(self, event):
"""Key press event handler - for widget-specific key press actions."""
def on_key_release(self, event):
"""Key release event handler and validator."""
if self.active:
key = event.key or ''
for (state, modifier) in self._state_modifier_keys.items():
# 'rotate' is changing _state on press and is not removed
# from _state when releasing
if modifier in key.split('+') and state != 'rotate':
self._state.discard(state)
self._on_key_release(event)
def _on_key_release(self, event):
"""Key release event handler."""
def set_visible(self, visible):
"""Set the visibility of the selector artists."""
self._visible = visible
for artist in self.artists:
artist.set_visible(visible)
def get_visible(self):
"""Get the visibility of the selector artists."""
return self._visible
def clear(self):
"""Clear the selection and set the selector ready to make a new one."""
self._clear_without_update()
self.update()
def _clear_without_update(self):
self._selection_completed = False
self.set_visible(False)
@property
def artists(self):
"""Tuple of the artists of the selector."""
handles_artists = getattr(self, '_handles_artists', ())
return (self._selection_artist,) + handles_artists
def set_props(self, **props):
"""
Set the properties of the selector artist.
See the *props* argument in the selector docstring to know which properties are
supported.
"""
artist = self._selection_artist
props = cbook.normalize_kwargs(props, artist)
artist.set(**props)
if self.useblit:
self.update()
def set_handle_props(self, **handle_props):
"""
Set the properties of the handles selector artist. See the
`handle_props` argument in the selector docstring to know which
properties are supported.
"""
if not hasattr(self, '_handles_artists'):
raise NotImplementedError("This selector doesn't have handles.")
artist = self._handles_artists[0]
handle_props = cbook.normalize_kwargs(handle_props, artist)
for handle in self._handles_artists:
handle.set(**handle_props)
if self.useblit:
self.update()
self._handle_props.update(handle_props)
def _validate_state(self, state):
supported_state = [
key for key, value in self._state_modifier_keys.items()
if key != 'clear' and value != 'not-applicable'
]
_api.check_in_list(supported_state, state=state)
def add_state(self, state):
"""
Add a state to define the widget's behavior. See the
`state_modifier_keys` parameters for details.
Parameters
----------
state : str
Must be a supported state of the selector. See the
`state_modifier_keys` parameters for details.
Raises
------
ValueError
When the state is not supported by the selector.
"""
self._validate_state(state)
self._state.add(state)
def remove_state(self, state):
"""
Remove a state to define the widget's behavior. See the
`state_modifier_keys` parameters for details.
Parameters
----------
state : str
Must be a supported state of the selector. See the
`state_modifier_keys` parameters for details.
Raises
------
ValueError
When the state is not supported by the selector.
"""
self._validate_state(state)
self._state.remove(state)
class SpanSelector(_SelectorWidget):
"""
Visually select a min/max range on a single axis and call a function with
those values.
To guarantee that the selector remains responsive, keep a reference to it.
In order to turn off the SpanSelector, set ``span_selector.active`` to
False. To turn it back on, set it to True.
Press and release events triggered at the same coordinates outside the
selection will clear the selector, except when
``ignore_event_outside=True``.
Parameters
----------
ax : `~matplotlib.axes.Axes`
onselect : callable with signature ``func(min: float, max: float)``
A callback function that is called after a release event and the
selection is created, changed or removed.
direction : {"horizontal", "vertical"}
The direction along which to draw the span selector.
minspan : float, default: 0
If selection is less than or equal to *minspan*, the selection is
removed (when already existing) or cancelled.
useblit : bool, default: False
If True, use the backend-dependent blitting features for faster
canvas updates. See the tutorial :ref:`blitting` for details.
props : dict, default: {'facecolor': 'red', 'alpha': 0.5}
Dictionary of `.Patch` properties.
onmove_callback : callable with signature ``func(min: float, max: float)``, optional
Called on mouse move while the span is being selected.
interactive : bool, default: False
Whether to draw a set of handles that allow interaction with the
widget after it is drawn.
button : `.MouseButton` or list of `.MouseButton`, default: all buttons
The mouse buttons which activate the span selector.
handle_props : dict, default: None
Properties of the handle lines at the edges of the span. Only used
when *interactive* is True. See `.Line2D` for valid properties.
grab_range : float, default: 10
Distance in pixels within which the interactive tool handles can be activated.
state_modifier_keys : dict, optional
Keyboard modifiers which affect the widget's behavior. Values
amend the defaults, which are:
- "clear": Clear the current shape, default: "escape".
drag_from_anywhere : bool, default: False
If `True`, the widget can be moved by clicking anywhere within its bounds.
ignore_event_outside : bool, default: False
If `True`, the event triggered outside the span selector will be ignored.
snap_values : 1D array-like, optional
Snap the selector edges to the given values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.widgets as mwidgets
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [10, 50, 100])
>>> def onselect(vmin, vmax):
... print(vmin, vmax)
>>> span = mwidgets.SpanSelector(ax, onselect, 'horizontal',
... props=dict(facecolor='blue', alpha=0.5))
>>> fig.show()
See also: :doc:`/gallery/widgets/span_selector`
"""
def __init__(self, ax, onselect, direction, *, minspan=0, useblit=False,
props=None, onmove_callback=None, interactive=False,
button=None, handle_props=None, grab_range=10,
state_modifier_keys=None, drag_from_anywhere=False,
ignore_event_outside=False, snap_values=None):
if state_modifier_keys is None:
state_modifier_keys = dict(clear='escape',
square='not-applicable',
center='not-applicable',
rotate='not-applicable')
super().__init__(ax, onselect, useblit=useblit, button=button,
state_modifier_keys=state_modifier_keys)
if props is None:
props = dict(facecolor='red', alpha=0.5)
props['animated'] = self.useblit
self.direction = direction
self._extents_on_press = None
self.snap_values = snap_values
self.onmove_callback = onmove_callback
self.minspan = minspan
self.grab_range = grab_range
self._interactive = interactive
self._edge_handles = None
self.drag_from_anywhere = drag_from_anywhere
self.ignore_event_outside = ignore_event_outside
self.new_axes(ax, _props=props, _init=True)
# Setup handles
self._handle_props = {
'color': props.get('facecolor', 'r'),
**cbook.normalize_kwargs(handle_props, Line2D)}
if self._interactive:
self._edge_order = ['min', 'max']
self._setup_edge_handles(self._handle_props)
self._active_handle = None
def new_axes(self, ax, *, _props=None, _init=False):
"""Set SpanSelector to operate on a new Axes."""
reconnect = False
if _init or self.canvas is not ax.get_figure(root=True).canvas:
if self.canvas is not None:
self.disconnect_events()
reconnect = True
self.ax = ax
if reconnect:
self.connect_default_events()
# Reset
self._selection_completed = False
if self.direction == 'horizontal':
trans = ax.get_xaxis_transform()
w, h = 0, 1
else:
trans = ax.get_yaxis_transform()
w, h = 1, 0
rect_artist = Rectangle((0, 0), w, h, transform=trans, visible=False)
if _props is not None:
rect_artist.update(_props)
elif self._selection_artist is not None:
rect_artist.update_from(self._selection_artist)
self.ax.add_patch(rect_artist)
self._selection_artist = rect_artist
def _setup_edge_handles(self, props):
# Define initial position using the axis bounds to keep the same bounds
if self.direction == 'horizontal':
positions = self.ax.get_xbound()
else:
positions = self.ax.get_ybound()
self._edge_handles = ToolLineHandles(self.ax, positions,
direction=self.direction,
line_props=props,
useblit=self.useblit)
@property
def _handles_artists(self):
if self._edge_handles is not None:
return self._edge_handles.artists
else:
return ()
def _set_cursor(self, enabled):
"""Update the canvas cursor based on direction of the selector."""
if enabled:
cursor = (backend_tools.Cursors.RESIZE_HORIZONTAL
if self.direction == 'horizontal' else
backend_tools.Cursors.RESIZE_VERTICAL)
else:
cursor = backend_tools.Cursors.POINTER
self.ax.get_figure(root=True).canvas.set_cursor(cursor)
def connect_default_events(self):
# docstring inherited
super().connect_default_events()
if getattr(self, '_interactive', False):
self.connect_event('motion_notify_event', self._hover)
def _press(self, event):
"""Button press event handler."""
self._set_cursor(True)
if self._interactive and self._selection_artist.get_visible():
self._set_active_handle(event)
else:
self._active_handle = None
if self._active_handle is None or not self._interactive:
# Clear previous rectangle before drawing new rectangle.
self.update()
xdata, ydata = self._get_data_coords(event)
v = xdata if self.direction == 'horizontal' else ydata
if self._active_handle is None and not self.ignore_event_outside:
# when the press event outside the span, we initially set the
# visibility to False and extents to (v, v)
# update will be called when setting the extents
self._visible = False
self._set_extents((v, v))
# We need to set the visibility back, so the span selector will be
# drawn when necessary (span width > 0)
self._visible = True
else:
self.set_visible(True)
return False
@property
def direction(self):
"""Direction of the span selector: 'vertical' or 'horizontal'."""
return self._direction
@direction.setter
def direction(self, direction):
"""Set the direction of the span selector."""
_api.check_in_list(['horizontal', 'vertical'], direction=direction)
if hasattr(self, '_direction') and direction != self._direction:
# remove previous artists
self._selection_artist.remove()
if self._interactive:
self._edge_handles.remove()
self._direction = direction
self.new_axes(self.ax)
if self._interactive:
self._setup_edge_handles(self._handle_props)
else:
self._direction = direction
def _release(self, event):
"""Button release event handler."""
self._set_cursor(False)
if not self._interactive:
self._selection_artist.set_visible(False)
if (self._active_handle is None and self._selection_completed and
self.ignore_event_outside):
return
vmin, vmax = self.extents
span = vmax - vmin
if span <= self.minspan:
# Remove span and set self._selection_completed = False
self.set_visible(False)
if self._selection_completed:
# Call onselect, only when the span is already existing
self.onselect(vmin, vmax)
self._selection_completed = False
else:
self.onselect(vmin, vmax)
self._selection_completed = True
self.update()
self._active_handle = None
return False
def _hover(self, event):
"""Update the canvas cursor if it's over a handle."""
if self.ignore(event):
return
if self._active_handle is not None or not self._selection_completed:
# Do nothing if button is pressed and a handle is active, which may
# occur with drag_from_anywhere=True.
# Do nothing if selection is not completed, which occurs when
# a selector has been cleared
return
_, e_dist = self._edge_handles.closest(event.x, event.y)
self._set_cursor(e_dist <= self.grab_range)
def _onmove(self, event):
"""Motion notify event handler."""
xdata, ydata = self._get_data_coords(event)
if self.direction == 'horizontal':
v = xdata
vpress = self._eventpress.xdata
else:
v = ydata
vpress = self._eventpress.ydata
# move existing span
# When "dragging from anywhere", `self._active_handle` is set to 'C'
# (match notation used in the RectangleSelector)
if self._active_handle == 'C' and self._extents_on_press is not None:
vmin, vmax = self._extents_on_press
dv = v - vpress
vmin += dv
vmax += dv
# resize an existing shape
elif self._active_handle and self._active_handle != 'C':
vmin, vmax = self._extents_on_press
if self._active_handle == 'min':
vmin = v
else:
vmax = v
# new shape
else:
# Don't create a new span if there is already one when
# ignore_event_outside=True
if self.ignore_event_outside and self._selection_completed:
return
vmin, vmax = vpress, v
if vmin > vmax:
vmin, vmax = vmax, vmin
self._set_extents((vmin, vmax))
if self.onmove_callback is not None:
self.onmove_callback(vmin, vmax)
return False
def _draw_shape(self, vmin, vmax):
if vmin > vmax:
vmin, vmax = vmax, vmin
if self.direction == 'horizontal':
self._selection_artist.set_x(vmin)
self._selection_artist.set_width(vmax - vmin)
else:
self._selection_artist.set_y(vmin)
self._selection_artist.set_height(vmax - vmin)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event."""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
# Prioritise center handle over other handles
# Use 'C' to match the notation used in the RectangleSelector
if 'move' in self._state:
self._active_handle = 'C'
elif e_dist > self.grab_range:
# Not close to any handles
self._active_handle = None
if self.drag_from_anywhere and self._contains(event):
# Check if we've clicked inside the region
self._active_handle = 'C'
self._extents_on_press = self.extents
else:
self._active_handle = None
return
else:
# Closest to an edge handle
self._active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
self._extents_on_press = self.extents
def _contains(self, event):
"""Return True if event is within the patch."""
return self._selection_artist.contains(event, radius=0)[0]
@staticmethod
def _snap(values, snap_values):
"""Snap values to a given array values (snap_values)."""
# take into account machine precision
eps = np.min(np.abs(np.diff(snap_values))) * 1e-12
return tuple(
snap_values[np.abs(snap_values - v + np.sign(v) * eps).argmin()]
for v in values)
@property
def extents(self):
"""
(float, float)
The values, in data coordinates, for the start and end points of the current
selection. If there is no selection then the start and end values will be
the same.
"""
if self.direction == 'horizontal':
vmin = self._selection_artist.get_x()
vmax = vmin + self._selection_artist.get_width()
else:
vmin = self._selection_artist.get_y()
vmax = vmin + self._selection_artist.get_height()
return vmin, vmax
@extents.setter
def extents(self, extents):
self._set_extents(extents)
self._selection_completed = True
def _set_extents(self, extents):
# Update displayed shape
if self.snap_values is not None:
extents = tuple(self._snap(extents, self.snap_values))
self._draw_shape(*extents)
if self._interactive:
# Update displayed handles
self._edge_handles.set_data(self.extents)
self.set_visible(self._visible)
self.update()
class ToolLineHandles:
"""
Control handles for canvas tools.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Matplotlib Axes where tool handles are displayed.
positions : 1D array
Positions of handles in data coordinates.
direction : {"horizontal", "vertical"}
Direction of handles, either 'vertical' or 'horizontal'
line_props : dict, optional
Additional line properties. See `.Line2D`.
useblit : bool, default: True
Whether to use blitting for faster drawing (if supported by the
backend). See the tutorial :ref:`blitting`
for details.
"""
def __init__(self, ax, positions, direction, *, line_props=None,
useblit=True):
self.ax = ax
_api.check_in_list(['horizontal', 'vertical'], direction=direction)
self._direction = direction
line_props = {
**(line_props if line_props is not None else {}),
'visible': False,
'animated': useblit,
}
line_fun = ax.axvline if self.direction == 'horizontal' else ax.axhline
self._artists = [line_fun(p, **line_props) for p in positions]
@property
def artists(self):
return tuple(self._artists)
@property
def positions(self):
"""Positions of the handle in data coordinates."""
method = 'get_xdata' if self.direction == 'horizontal' else 'get_ydata'
return [getattr(line, method)()[0] for line in self.artists]
@property
def direction(self):
"""Direction of the handle: 'vertical' or 'horizontal'."""
return self._direction
def set_data(self, positions):
"""
Set x- or y-positions of handles, depending on if the lines are
vertical or horizontal.
Parameters
----------
positions : tuple of length 2
Set the positions of the handle in data coordinates
"""
method = 'set_xdata' if self.direction == 'horizontal' else 'set_ydata'
for line, p in zip(self.artists, positions):
getattr(line, method)([p, p])
def set_visible(self, value):
"""Set the visibility state of the handles artist."""
for artist in self.artists:
artist.set_visible(value)
def set_animated(self, value):
"""Set the animated state of the handles artist."""
for artist in self.artists:
artist.set_animated(value)
def remove(self):
"""Remove the handles artist from the figure."""
for artist in self._artists:
artist.remove()
def closest(self, x, y):
"""
Return index and pixel distance to closest handle.
Parameters
----------
x, y : float
x, y position from which the distance will be calculated to
determinate the closest handle
Returns
-------
index, distance : index of the handle and its distance from
position x, y
"""
if self.direction == 'horizontal':
p_pts = np.array([
self.ax.transData.transform((p, 0))[0] for p in self.positions
])
dist = abs(p_pts - x)
else:
p_pts = np.array([
self.ax.transData.transform((0, p))[1] for p in self.positions
])
dist = abs(p_pts - y)
index = np.argmin(dist)
return index, dist[index]
class ToolHandles:
"""
Control handles for canvas tools.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Matplotlib Axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str, default: 'o'
Shape of marker used to display handle. See `~.pyplot.plot`.
marker_props : dict, optional
Additional marker properties. See `.Line2D`.
useblit : bool, default: True
Whether to use blitting for faster drawing (if supported by the
backend). See the tutorial :ref:`blitting`
for details.
"""
def __init__(self, ax, x, y, *, marker='o', marker_props=None, useblit=True):
self.ax = ax
props = {'marker': marker, 'markersize': 7, 'markerfacecolor': 'w',
'linestyle': 'none', 'alpha': 0.5, 'visible': False,
'label': '_nolegend_',
**cbook.normalize_kwargs(marker_props, Line2D._alias_map)}
self._markers = Line2D(x, y, animated=useblit, **props)
self.ax.add_line(self._markers)
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
@property
def artists(self):
return (self._markers, )
def set_data(self, pts, y=None):
"""Set x and y positions of handles."""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.column_stack([self.x, self.y])
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - [x, y]
dist = np.hypot(*diff.T)
min_index = np.argmin(dist)
return min_index, dist[min_index]
_RECTANGLESELECTOR_PARAMETERS_DOCSTRING = \
r"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
onselect : function, optional
A callback function that is called after a release event and the
selection is created, changed or removed.
It must have the signature::
def onselect(eclick: MouseEvent, erelease: MouseEvent)
where *eclick* and *erelease* are the mouse click and release
`.MouseEvent`\s that start and complete the selection.
minspanx : float, default: 0
Selections with an x-span less than or equal to *minspanx* are removed
(when already existing) or cancelled.
minspany : float, default: 0
Selections with an y-span less than or equal to *minspanx* are removed
(when already existing) or cancelled.
useblit : bool, default: False
Whether to use blitting for faster drawing (if supported by the
backend). See the tutorial :ref:`blitting`
for details.
props : dict, optional
Properties with which the __ARTIST_NAME__ is drawn. See
`.Patch` for valid properties.
Default:
``dict(facecolor='red', edgecolor='black', alpha=0.2, fill=True)``
spancoords : {"data", "pixels"}, default: "data"
Whether to interpret *minspanx* and *minspany* in data or in pixel
coordinates.
button : `.MouseButton`, list of `.MouseButton`, default: all buttons
Button(s) that trigger rectangle selection.
grab_range : float, default: 10
Distance in pixels within which the interactive tool handles can be
activated.
handle_props : dict, optional
Properties with which the interactive handles (marker artists) are
drawn. See the marker arguments in `.Line2D` for valid
properties. Default values are defined in ``mpl.rcParams`` except for
the default value of ``markeredgecolor`` which will be the same as the
``edgecolor`` property in *props*.
interactive : bool, default: False
Whether to draw a set of handles that allow interaction with the
widget after it is drawn.
state_modifier_keys : dict, optional
Keyboard modifiers which affect the widget's behavior. Values
amend the defaults, which are:
- "move": Move the existing shape, default: no modifier.
- "clear": Clear the current shape, default: "escape".
- "square": Make the shape square, default: "shift".
- "center": change the shape around its center, default: "ctrl".
- "rotate": Rotate the shape around its center between -45° and 45°,
default: "r".
"square" and "center" can be combined. The square shape can be defined
in data or display coordinates as determined by the
``use_data_coordinates`` argument specified when creating the selector.
drag_from_anywhere : bool, default: False
If `True`, the widget can be moved by clicking anywhere within
its bounds.
ignore_event_outside : bool, default: False
If `True`, the event triggered outside the span selector will be
ignored.
use_data_coordinates : bool, default: False
If `True`, the "square" shape of the selector is defined in
data coordinates instead of display coordinates.
"""
@_docstring.Substitution(_RECTANGLESELECTOR_PARAMETERS_DOCSTRING.replace(
'__ARTIST_NAME__', 'rectangle'))
class RectangleSelector(_SelectorWidget):
"""
Select a rectangular region of an Axes.
For the cursor to remain responsive you must keep a reference to it.
Press and release events triggered at the same coordinates outside the
selection will clear the selector, except when
``ignore_event_outside=True``.
%s
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.widgets as mwidgets
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [10, 50, 100])
>>> def onselect(eclick, erelease):
... print(eclick.xdata, eclick.ydata)
... print(erelease.xdata, erelease.ydata)
>>> props = dict(facecolor='blue', alpha=0.5)
>>> rect = mwidgets.RectangleSelector(ax, onselect, interactive=True,
... props=props)
>>> fig.show()
>>> rect.add_state('square')
See also: :doc:`/gallery/widgets/rectangle_selector`
"""
def __init__(self, ax, onselect=None, *, minspanx=0,
minspany=0, useblit=False,
props=None, spancoords='data', button=None, grab_range=10,
handle_props=None, interactive=False,
state_modifier_keys=None, drag_from_anywhere=False,
ignore_event_outside=False, use_data_coordinates=False):
super().__init__(ax, onselect, useblit=useblit, button=button,
state_modifier_keys=state_modifier_keys,
use_data_coordinates=use_data_coordinates)
self._interactive = interactive
self.drag_from_anywhere = drag_from_anywhere
self.ignore_event_outside = ignore_event_outside
self._rotation = 0.0
self._aspect_ratio_correction = 1.0
# State to allow the option of an interactive selector that can't be
# interactively drawn. This is used in PolygonSelector as an
# interactive bounding box to allow the polygon to be easily resized
self._allow_creation = True
if props is None:
props = dict(facecolor='red', edgecolor='black',
alpha=0.2, fill=True)
props = {**props, 'animated': self.useblit}
self._visible = props.pop('visible', self._visible)
to_draw = self._init_shape(**props)
self.ax.add_patch(to_draw)
self._selection_artist = to_draw
self._set_aspect_ratio_correction()
self.minspanx = minspanx
self.minspany = minspany
_api.check_in_list(['data', 'pixels'], spancoords=spancoords)
self.spancoords = spancoords
self.grab_range = grab_range
if self._interactive:
self._handle_props = {
'markeredgecolor': (props or {}).get('edgecolor', 'black'),
**cbook.normalize_kwargs(handle_props, Line2D)}
self._corner_order = ['SW', 'SE', 'NE', 'NW']
xc, yc = self.corners
self._corner_handles = ToolHandles(self.ax, xc, yc,
marker_props=self._handle_props,
useblit=self.useblit)
self._edge_order = ['W', 'S', 'E', 'N']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
marker_props=self._handle_props,
useblit=self.useblit)
xc, yc = self.center
self._center_handle = ToolHandles(self.ax, [xc], [yc], marker='s',
marker_props=self._handle_props,
useblit=self.useblit)
self._active_handle = None
self._extents_on_press = None
@property
def _handles_artists(self):
return (*self._center_handle.artists, *self._corner_handles.artists,
*self._edge_handles.artists)
def _init_shape(self, **props):
return Rectangle((0, 0), 0, 1, visible=False,
rotation_point='center', **props)
def _press(self, event):
"""Button press event handler."""
# make the drawn box/line visible get the click-coordinates, button, ...
if self._interactive and self._selection_artist.get_visible():
self._set_active_handle(event)
else:
self._active_handle = None
if ((self._active_handle is None or not self._interactive) and
self._allow_creation):
# Clear previous rectangle before drawing new rectangle.
self.update()
if (self._active_handle is None and not self.ignore_event_outside and
self._allow_creation):
x, y = self._get_data_coords(event)
self._visible = False
self.extents = x, x, y, y
self._visible = True
else:
self.set_visible(True)
self._extents_on_press = self.extents
self._rotation_on_press = self._rotation
self._set_aspect_ratio_correction()
return False
def _release(self, event):
"""Button release event handler."""
if not self._interactive:
self._selection_artist.set_visible(False)
if (self._active_handle is None and self._selection_completed and
self.ignore_event_outside):
return
# update the eventpress and eventrelease with the resulting extents
x0, x1, y0, y1 = self.extents
self._eventpress.xdata = x0
self._eventpress.ydata = y0
xy0 = self.ax.transData.transform([x0, y0])
self._eventpress.x, self._eventpress.y = xy0
self._eventrelease.xdata = x1
self._eventrelease.ydata = y1
xy1 = self.ax.transData.transform([x1, y1])
self._eventrelease.x, self._eventrelease.y = xy1
# calculate dimensions of box or line
if self.spancoords == 'data':
spanx = abs(self._eventpress.xdata - self._eventrelease.xdata)
spany = abs(self._eventpress.ydata - self._eventrelease.ydata)
elif self.spancoords == 'pixels':
spanx = abs(self._eventpress.x - self._eventrelease.x)
spany = abs(self._eventpress.y - self._eventrelease.y)
else:
_api.check_in_list(['data', 'pixels'],
spancoords=self.spancoords)
# check if drawn distance (if it exists) is not too small in
# either x or y-direction
if spanx <= self.minspanx or spany <= self.minspany:
if self._selection_completed:
# Call onselect, only when the selection is already existing
self.onselect(self._eventpress, self._eventrelease)
self._clear_without_update()
else:
self.onselect(self._eventpress, self._eventrelease)
self._selection_completed = True
self.update()
self._active_handle = None
self._extents_on_press = None
return False
def _onmove(self, event):
"""
Motion notify event handler.
This can do one of four things:
- Translate
- Rotate
- Re-size
- Continue the creation of a new shape
"""
eventpress = self._eventpress
# The calculations are done for rotation at zero: we apply inverse
# transformation to events except when we rotate and move
state = self._state
rotate = 'rotate' in state and self._active_handle in self._corner_order
move = self._active_handle == 'C'
resize = self._active_handle and not move
xdata, ydata = self._get_data_coords(event)
if resize:
inv_tr = self._get_rotation_transform().inverted()
xdata, ydata = inv_tr.transform([xdata, ydata])
eventpress.xdata, eventpress.ydata = inv_tr.transform(
(eventpress.xdata, eventpress.ydata))
dx = xdata - eventpress.xdata
dy = ydata - eventpress.ydata
# refmax is used when moving the corner handle with the square state
# and is the maximum between refx and refy
refmax = None
if self._use_data_coordinates:
refx, refy = dx, dy
else:
# Get dx/dy in display coordinates
refx = event.x - eventpress.x
refy = event.y - eventpress.y
x0, x1, y0, y1 = self._extents_on_press
# rotate an existing shape
if rotate:
# calculate angle abc
a = (eventpress.xdata, eventpress.ydata)
b = self.center
c = (xdata, ydata)
angle = (np.arctan2(c[1]-b[1], c[0]-b[0]) -
np.arctan2(a[1]-b[1], a[0]-b[0]))
self.rotation = np.rad2deg(self._rotation_on_press + angle)
elif resize:
size_on_press = [x1 - x0, y1 - y0]
center = (x0 + size_on_press[0] / 2, y0 + size_on_press[1] / 2)
# Keeping the center fixed
if 'center' in state:
# hh, hw are half-height and half-width
if 'square' in state:
# when using a corner, find which reference to use
if self._active_handle in self._corner_order:
refmax = max(refx, refy, key=abs)
if self._active_handle in ['E', 'W'] or refmax == refx:
hw = xdata - center[0]
hh = hw / self._aspect_ratio_correction
else:
hh = ydata - center[1]
hw = hh * self._aspect_ratio_correction
else:
hw = size_on_press[0] / 2
hh = size_on_press[1] / 2
# cancel changes in perpendicular direction
if self._active_handle in ['E', 'W'] + self._corner_order:
hw = abs(xdata - center[0])
if self._active_handle in ['N', 'S'] + self._corner_order:
hh = abs(ydata - center[1])
x0, x1, y0, y1 = (center[0] - hw, center[0] + hw,
center[1] - hh, center[1] + hh)
else:
# change sign of relative changes to simplify calculation
# Switch variables so that x1 and/or y1 are updated on move
if 'W' in self._active_handle:
x0 = x1
if 'S' in self._active_handle:
y0 = y1
if self._active_handle in ['E', 'W'] + self._corner_order:
x1 = xdata
if self._active_handle in ['N', 'S'] + self._corner_order:
y1 = ydata
if 'square' in state:
# when using a corner, find which reference to use
if self._active_handle in self._corner_order:
refmax = max(refx, refy, key=abs)
if self._active_handle in ['E', 'W'] or refmax == refx:
sign = np.sign(ydata - y0)
y1 = y0 + sign * abs(x1 - x0) / self._aspect_ratio_correction
else:
sign = np.sign(xdata - x0)
x1 = x0 + sign * abs(y1 - y0) * self._aspect_ratio_correction
elif move:
x0, x1, y0, y1 = self._extents_on_press
dx = xdata - eventpress.xdata
dy = ydata - eventpress.ydata
x0 += dx
x1 += dx
y0 += dy
y1 += dy
else:
# Create a new shape
self._rotation = 0
# Don't create a new rectangle if there is already one when
# ignore_event_outside=True
if ((self.ignore_event_outside and self._selection_completed) or
not self._allow_creation):
return
center = [eventpress.xdata, eventpress.ydata]
dx = (xdata - center[0]) / 2
dy = (ydata - center[1]) / 2
# square shape
if 'square' in state:
refmax = max(refx, refy, key=abs)
if refmax == refx:
dy = np.sign(dy) * abs(dx) / self._aspect_ratio_correction
else:
dx = np.sign(dx) * abs(dy) * self._aspect_ratio_correction
# from center
if 'center' in state:
dx *= 2
dy *= 2
# from corner
else:
center[0] += dx
center[1] += dy
x0, x1, y0, y1 = (center[0] - dx, center[0] + dx,
center[1] - dy, center[1] + dy)
self.extents = x0, x1, y0, y1
@property
def _rect_bbox(self):
return self._selection_artist.get_bbox().bounds
def _set_aspect_ratio_correction(self):
aspect_ratio = self.ax._get_aspect_ratio()
self._selection_artist._aspect_ratio_correction = aspect_ratio
if self._use_data_coordinates:
self._aspect_ratio_correction = 1
else:
self._aspect_ratio_correction = aspect_ratio
def _get_rotation_transform(self):
aspect_ratio = self.ax._get_aspect_ratio()
return Affine2D().translate(-self.center[0], -self.center[1]) \
.scale(1, aspect_ratio) \
.rotate(self._rotation) \
.scale(1, 1 / aspect_ratio) \
.translate(*self.center)
@property
def corners(self):
"""
Corners of rectangle in data coordinates from lower left,
moving clockwise.
"""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
transform = self._get_rotation_transform()
coords = transform.transform(np.array([xc, yc]).T).T
return coords[0], coords[1]
@property
def edge_centers(self):
"""
Midpoint of rectangle edges in data coordinates from left,
moving anti-clockwise.
"""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
transform = self._get_rotation_transform()
coords = transform.transform(np.array([xe, ye]).T).T
return coords[0], coords[1]
@property
def center(self):
"""Center of rectangle in data coordinates."""
x0, y0, width, height = self._rect_bbox
return x0 + width / 2., y0 + height / 2.
@property
def extents(self):
"""
Return (xmin, xmax, ymin, ymax) in data coordinates as defined by the
bounding box before rotation.
"""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
# Update displayed shape
self._draw_shape(extents)
if self._interactive:
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
x, y = self.center
self._center_handle.set_data([x], [y])
self.set_visible(self._visible)
self.update()
@property
def rotation(self):
"""
Rotation in degree in interval [-45°, 45°]. The rotation is limited in
range to keep the implementation simple.
"""
return np.rad2deg(self._rotation)
@rotation.setter
def rotation(self, value):
# Restrict to a limited range of rotation [-45°, 45°] to avoid changing
# order of handles
if -45 <= value and value <= 45:
self._rotation = np.deg2rad(value)
# call extents setter to draw shape and update handles positions
self.extents = self.extents
def _draw_shape(self, extents):
x0, x1, y0, y1 = extents
xmin, xmax = sorted([x0, x1])
ymin, ymax = sorted([y0, y1])
xlim = sorted(self.ax.get_xlim())
ylim = sorted(self.ax.get_ylim())
xmin = max(xlim[0], xmin)
ymin = max(ylim[0], ymin)
xmax = min(xmax, xlim[1])
ymax = min(ymax, ylim[1])
self._selection_artist.set_x(xmin)
self._selection_artist.set_y(ymin)
self._selection_artist.set_width(xmax - xmin)
self._selection_artist.set_height(ymax - ymin)
self._selection_artist.set_angle(self.rotation)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event."""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
m_idx, m_dist = self._center_handle.closest(event.x, event.y)
if 'move' in self._state:
self._active_handle = 'C'
# Set active handle as closest handle, if mouse click is close enough.
elif m_dist < self.grab_range * 2:
# Prioritise center handle over other handles
self._active_handle = 'C'
elif c_dist > self.grab_range and e_dist > self.grab_range:
# Not close to any handles
if self.drag_from_anywhere and self._contains(event):
# Check if we've clicked inside the region
self._active_handle = 'C'
else:
self._active_handle = None
return
elif c_dist < e_dist:
# Closest to a corner handle
self._active_handle = self._corner_order[c_idx]
else:
# Closest to an edge handle
self._active_handle = self._edge_order[e_idx]
def _contains(self, event):
"""Return True if event is within the patch."""
return self._selection_artist.contains(event, radius=0)[0]
@property
def geometry(self):
"""
Return an array of shape (2, 5) containing the
x (``RectangleSelector.geometry[1, :]``) and
y (``RectangleSelector.geometry[0, :]``) data coordinates of the four
corners of the rectangle starting and ending in the top left corner.
"""
if hasattr(self._selection_artist, 'get_verts'):
xfm = self.ax.transData.inverted()
y, x = xfm.transform(self._selection_artist.get_verts()).T
return np.array([x, y])
else:
return np.array(self._selection_artist.get_data())
@_docstring.Substitution(_RECTANGLESELECTOR_PARAMETERS_DOCSTRING.replace(
'__ARTIST_NAME__', 'ellipse'))
class EllipseSelector(RectangleSelector):
"""
Select an elliptical region of an Axes.
For the cursor to remain responsive you must keep a reference to it.
Press and release events triggered at the same coordinates outside the
selection will clear the selector, except when
``ignore_event_outside=True``.
%s
Examples
--------
:doc:`/gallery/widgets/rectangle_selector`
"""
def _init_shape(self, **props):
return Ellipse((0, 0), 0, 1, visible=False, **props)
def _draw_shape(self, extents):
x0, x1, y0, y1 = extents
xmin, xmax = sorted([x0, x1])
ymin, ymax = sorted([y0, y1])
center = [x0 + (x1 - x0) / 2., y0 + (y1 - y0) / 2.]
a = (xmax - xmin) / 2.
b = (ymax - ymin) / 2.
self._selection_artist.center = center
self._selection_artist.width = 2 * a
self._selection_artist.height = 2 * b
self._selection_artist.angle = self.rotation
@property
def _rect_bbox(self):
x, y = self._selection_artist.center
width = self._selection_artist.width
height = self._selection_artist.height
return x - width / 2., y - height / 2., width, height
class LassoSelector(_SelectorWidget):
"""
Selection curve of an arbitrary shape.
For the selector to remain responsive you must keep a reference to it.
The selected path can be used in conjunction with `~.Path.contains_point`
to select data points from an image.
In contrast to `Lasso`, `LassoSelector` is written with an interface
similar to `RectangleSelector` and `SpanSelector`, and will continue to
interact with the Axes until disconnected.
Example usage::
ax = plt.subplot()
ax.plot(x, y)
def onselect(verts):
print(verts)
lasso = LassoSelector(ax, onselect)
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
onselect : function, optional
Whenever the lasso is released, the *onselect* function is called and
passed the vertices of the selected path.
useblit : bool, default: True
Whether to use blitting for faster drawing (if supported by the
backend). See the tutorial :ref:`blitting`
for details.
props : dict, optional
Properties with which the line is drawn, see `.Line2D`
for valid properties. Default values are defined in ``mpl.rcParams``.
button : `.MouseButton` or list of `.MouseButton`, optional
The mouse buttons used for rectangle selection. Default is ``None``,
which corresponds to all buttons.
"""
def __init__(self, ax, onselect=None, *, useblit=True, props=None, button=None):
super().__init__(ax, onselect, useblit=useblit, button=button)
self.verts = None
props = {
**(props if props is not None else {}),
# Note that self.useblit may be != useblit, if the canvas doesn't
# support blitting.
'animated': self.useblit, 'visible': False,
}
line = Line2D([], [], **props)
self.ax.add_line(line)
self._selection_artist = line
def _press(self, event):
self.verts = [self._get_data(event)]
self._selection_artist.set_visible(True)
def _release(self, event):
if self.verts is not None:
self.verts.append(self._get_data(event))
self.onselect(self.verts)
self._selection_artist.set_data([[], []])
self._selection_artist.set_visible(False)
self.verts = None
def _onmove(self, event):
if self.verts is None:
return
self.verts.append(self._get_data(event))
self._selection_artist.set_data(list(zip(*self.verts)))
self.update()
class PolygonSelector(_SelectorWidget):
"""
Select a polygon region of an Axes.
Place vertices with each mouse click, and make the selection by completing
the polygon (clicking on the first vertex). Once drawn individual vertices
can be moved by clicking and dragging with the left mouse button, or
removed by clicking the right mouse button.
In addition, the following modifier keys can be used:
- Hold *ctrl* and click and drag a vertex to reposition it before the
polygon has been completed.
- Hold the *shift* key and click and drag anywhere in the Axes to move
all vertices.
- Press the *esc* key to start a new polygon.
For the selector to remain responsive you must keep a reference to it.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
onselect : function, optional
When a polygon is completed or modified after completion,
the *onselect* function is called and passed a list of the vertices as
``(xdata, ydata)`` tuples.
useblit : bool, default: False
Whether to use blitting for faster drawing (if supported by the
backend). See the tutorial :ref:`blitting`
for details.
props : dict, optional
Properties with which the line is drawn, see `.Line2D` for valid properties.
Default::
dict(color='k', linestyle='-', linewidth=2, alpha=0.5)
handle_props : dict, optional
Artist properties for the markers drawn at the vertices of the polygon.
See the marker arguments in `.Line2D` for valid
properties. Default values are defined in ``mpl.rcParams`` except for
the default value of ``markeredgecolor`` which will be the same as the
``color`` property in *props*.
grab_range : float, default: 10
A vertex is selected (to complete the polygon or to move a vertex) if
the mouse click is within *grab_range* pixels of the vertex.
draw_bounding_box : bool, optional
If `True`, a bounding box will be drawn around the polygon selector
once it is complete. This box can be used to move and resize the
selector.
box_handle_props : dict, optional
Properties to set for the box handles. See the documentation for the
*handle_props* argument to `RectangleSelector` for more info.
box_props : dict, optional
Properties to set for the box. See the documentation for the *props*
argument to `RectangleSelector` for more info.
Examples
--------
:doc:`/gallery/widgets/polygon_selector_simple`
:doc:`/gallery/widgets/polygon_selector_demo`
Notes
-----
If only one point remains after removing points, the selector reverts to an
incomplete state and you can start drawing a new polygon from the existing
point.
"""
def __init__(self, ax, onselect=None, *, useblit=False,
props=None, handle_props=None, grab_range=10,
draw_bounding_box=False, box_handle_props=None,
box_props=None):
# The state modifiers 'move', 'square', and 'center' are expected by
# _SelectorWidget but are not supported by PolygonSelector
# Note: could not use the existing 'move' state modifier in-place of
# 'move_all' because _SelectorWidget automatically discards 'move'
# from the state on button release.
state_modifier_keys = dict(clear='escape', move_vertex='control',
move_all='shift', move='not-applicable',
square='not-applicable',
center='not-applicable',
rotate='not-applicable')
super().__init__(ax, onselect, useblit=useblit,
state_modifier_keys=state_modifier_keys)
self._xys = [(0, 0)]
if props is None:
props = dict(color='k', linestyle='-', linewidth=2, alpha=0.5)
props = {**props, 'animated': self.useblit}
self._selection_artist = line = Line2D([], [], **props)
self.ax.add_line(line)
if handle_props is None:
handle_props = dict(markeredgecolor='k',
markerfacecolor=props.get('color', 'k'))
self._handle_props = handle_props
self._polygon_handles = ToolHandles(self.ax, [], [],
useblit=self.useblit,
marker_props=self._handle_props)
self._active_handle_idx = -1
self.grab_range = grab_range
self.set_visible(True)
self._draw_box = draw_bounding_box
self._box = None
if box_handle_props is None:
box_handle_props = {}
self._box_handle_props = self._handle_props.update(box_handle_props)
self._box_props = box_props
def _get_bbox(self):
return self._selection_artist.get_bbox()
def _add_box(self):
self._box = RectangleSelector(self.ax,
useblit=self.useblit,
grab_range=self.grab_range,
handle_props=self._box_handle_props,
props=self._box_props,
interactive=True)
self._box._state_modifier_keys.pop('rotate')
self._box.connect_event('motion_notify_event', self._scale_polygon)
self._update_box()
# Set state that prevents the RectangleSelector from being created
# by the user
self._box._allow_creation = False
self._box._selection_completed = True
self._draw_polygon()
def _remove_box(self):
if self._box is not None:
self._box.set_visible(False)
self._box = None
def _update_box(self):
# Update selection box extents to the extents of the polygon
if self._box is not None:
bbox = self._get_bbox()
self._box.extents = [bbox.x0, bbox.x1, bbox.y0, bbox.y1]
# Save a copy
self._old_box_extents = self._box.extents
def _scale_polygon(self, event):
"""
Scale the polygon selector points when the bounding box is moved or
scaled.
This is set as a callback on the bounding box RectangleSelector.
"""
if not self._selection_completed:
return
if self._old_box_extents == self._box.extents:
return
# Create transform from old box to new box
x1, y1, w1, h1 = self._box._rect_bbox
old_bbox = self._get_bbox()
t = (transforms.Affine2D()
.translate(-old_bbox.x0, -old_bbox.y0)
.scale(1 / old_bbox.width, 1 / old_bbox.height)
.scale(w1, h1)
.translate(x1, y1))
# Update polygon verts. Must be a list of tuples for consistency.
new_verts = [(x, y) for x, y in t.transform(np.array(self.verts))]
self._xys = [*new_verts, new_verts[0]]
self._draw_polygon()
self._old_box_extents = self._box.extents
@property
def _handles_artists(self):
return self._polygon_handles.artists
def _remove_vertex(self, i):
"""Remove vertex with index i."""
if (len(self._xys) > 2 and
self._selection_completed and
i in (0, len(self._xys) - 1)):
# If selecting the first or final vertex, remove both first and
# last vertex as they are the same for a closed polygon
self._xys.pop(0)
self._xys.pop(-1)
# Close the polygon again by appending the new first vertex to the
# end
self._xys.append(self._xys[0])
else:
self._xys.pop(i)
if len(self._xys) <= 2:
# If only one point left, return to incomplete state to let user
# start drawing again
self._selection_completed = False
self._remove_box()
def _press(self, event):
"""Button press event handler."""
# Check for selection of a tool handle.
if ((self._selection_completed or 'move_vertex' in self._state)
and len(self._xys) > 0):
h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)
if h_dist < self.grab_range:
self._active_handle_idx = h_idx
# Save the vertex positions at the time of the press event (needed to
# support the 'move_all' state modifier).
self._xys_at_press = self._xys.copy()
def _release(self, event):
"""Button release event handler."""
# Release active tool handle.
if self._active_handle_idx >= 0:
if event.button == 3:
self._remove_vertex(self._active_handle_idx)
self._draw_polygon()
self._active_handle_idx = -1
# Complete the polygon.
elif len(self._xys) > 3 and self._xys[-1] == self._xys[0]:
self._selection_completed = True
if self._draw_box and self._box is None:
self._add_box()
# Place new vertex.
elif (not self._selection_completed
and 'move_all' not in self._state
and 'move_vertex' not in self._state):
self._xys.insert(-1, self._get_data_coords(event))
if self._selection_completed:
self.onselect(self.verts)
def onmove(self, event):
"""Cursor move event handler and validator."""
# Method overrides _SelectorWidget.onmove because the polygon selector
# needs to process the move callback even if there is no button press.
# _SelectorWidget.onmove include logic to ignore move event if
# _eventpress is None.
if self.ignore(event):
# Hide the cursor when interactive zoom/pan is active
if not self.canvas.widgetlock.available(self) and self._xys:
self._xys[-1] = (np.nan, np.nan)
self._draw_polygon()
return False
else:
event = self._clean_event(event)
self._onmove(event)
return True
def _onmove(self, event):
"""Cursor move event handler."""
# Move the active vertex (ToolHandle).
if self._active_handle_idx >= 0:
idx = self._active_handle_idx
self._xys[idx] = self._get_data_coords(event)
# Also update the end of the polygon line if the first vertex is
# the active handle and the polygon is completed.
if idx == 0 and self._selection_completed:
self._xys[-1] = self._get_data_coords(event)
# Move all vertices.
elif 'move_all' in self._state and self._eventpress:
xdata, ydata = self._get_data_coords(event)
dx = xdata - self._eventpress.xdata
dy = ydata - self._eventpress.ydata
for k in range(len(self._xys)):
x_at_press, y_at_press = self._xys_at_press[k]
self._xys[k] = x_at_press + dx, y_at_press + dy
# Do nothing if completed or waiting for a move.
elif (self._selection_completed
or 'move_vertex' in self._state or 'move_all' in self._state):
return
# Position pending vertex.
else:
# Calculate distance to the start vertex.
x0, y0 = \
self._selection_artist.get_transform().transform(self._xys[0])
v0_dist = np.hypot(x0 - event.x, y0 - event.y)
# Lock on to the start vertex if near it and ready to complete.
if len(self._xys) > 3 and v0_dist < self.grab_range:
self._xys[-1] = self._xys[0]
else:
self._xys[-1] = self._get_data_coords(event)
self._draw_polygon()
def _on_key_press(self, event):
"""Key press event handler."""
# Remove the pending vertex if entering the 'move_vertex' or
# 'move_all' mode
if (not self._selection_completed
and ('move_vertex' in self._state or
'move_all' in self._state)):
self._xys.pop()
self._draw_polygon()
def _on_key_release(self, event):
"""Key release event handler."""
# Add back the pending vertex if leaving the 'move_vertex' or
# 'move_all' mode (by checking the released key)
if (not self._selection_completed
and
(event.key == self._state_modifier_keys.get('move_vertex')
or event.key == self._state_modifier_keys.get('move_all'))):
self._xys.append(self._get_data_coords(event))
self._draw_polygon()
# Reset the polygon if the released key is the 'clear' key.
elif event.key == self._state_modifier_keys.get('clear'):
event = self._clean_event(event)
self._xys = [self._get_data_coords(event)]
self._selection_completed = False
self._remove_box()
self.set_visible(True)
def _draw_polygon_without_update(self):
"""Redraw the polygon based on new vertex positions, no update()."""
xs, ys = zip(*self._xys) if self._xys else ([], [])
self._selection_artist.set_data(xs, ys)
self._update_box()
# Only show one tool handle at the start and end vertex of the polygon
# if the polygon is completed or the user is locked on to the start
# vertex.
if (self._selection_completed
or (len(self._xys) > 3
and self._xys[-1] == self._xys[0])):
self._polygon_handles.set_data(xs[:-1], ys[:-1])
else:
self._polygon_handles.set_data(xs, ys)
def _draw_polygon(self):
"""Redraw the polygon based on the new vertex positions."""
self._draw_polygon_without_update()
self.update()
@property
def verts(self):
"""The polygon vertices, as a list of ``(x, y)`` pairs."""
return self._xys[:-1]
@verts.setter
def verts(self, xys):
"""
Set the polygon vertices.
This will remove any preexisting vertices, creating a complete polygon
with the new vertices.
"""
self._xys = [*xys, xys[0]]
self._selection_completed = True
self.set_visible(True)
if self._draw_box and self._box is None:
self._add_box()
self._draw_polygon()
def _clear_without_update(self):
self._selection_completed = False
self._xys = [(0, 0)]
self._draw_polygon_without_update()
class Lasso(AxesWidget):
"""
Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
`~matplotlib.path.Path.contains_point` to select data points from an image.
Unlike `LassoSelector`, this must be initialized with a starting
point *xy*, and the `Lasso` events are destroyed upon release.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent Axes for the widget.
xy : (float, float)
Coordinates of the start of the lasso.
callback : callable
Whenever the lasso is released, the *callback* function is called and
passed the vertices of the selected path.
useblit : bool, default: True
Whether to use blitting for faster drawing (if supported by the
backend). See the tutorial :ref:`blitting`
for details.
props: dict, optional
Lasso line properties. See `.Line2D` for valid properties.
Default *props* are::
{'linestyle' : '-', 'color' : 'black', 'lw' : 2}
.. versionadded:: 3.9
"""
def __init__(self, ax, xy, callback, *, useblit=True, props=None):
super().__init__(ax)
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
style = {'linestyle': '-', 'color': 'black', 'lw': 2}
if props is not None:
style.update(props)
x, y = xy
self.verts = [(x, y)]
self.line = Line2D([x], [y], **style)
self.ax.add_line(self.line)
self.callback = callback
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
self.verts.append(self._get_data_coords(event))
if len(self.verts) > 2:
self.callback(self.verts)
self.line.remove()
self.verts = None
self.disconnect_events()
def onmove(self, event):
if (self.ignore(event)
or self.verts is None
or event.button != 1
or not self.ax.contains(event)[0]):
return
self.verts.append(self._get_data_coords(event))
self.line.set_data(list(zip(*self.verts)))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
venv\Lib\site-packages\matplotlib\_afm.py
"""
A python interface to Adobe Font Metrics Files.
Although a number of other Python implementations exist, and may be more
complete than this, it was decided not to go with them because they were
either:
1) copyrighted or used a non-BSD compatible license
2) had too many dependencies and a free standing lib was needed
3) did more than needed and it was easier to write afresh rather than
figure out how to get just what was needed.
It is pretty easy to use, and has no external dependencies:
>>> import matplotlib as mpl
>>> from pathlib import Path
>>> afm_path = Path(mpl.get_data_path(), 'fonts', 'afm', 'ptmr8a.afm')
>>>
>>> from matplotlib.afm import AFM
>>> with afm_path.open('rb') as fh:
... afm = AFM(fh)
>>> afm.string_width_height('What the heck?')
(6220.0, 694)
>>> afm.get_fontname()
'Times-Roman'
>>> afm.get_kern_dist('A', 'f')
0
>>> afm.get_kern_dist('A', 'y')
-92.0
>>> afm.get_bbox_char('!')
[130, -9, 238, 676]
As in the Adobe Font Metrics File Format Specification, all dimensions
are given in units of 1/1000 of the scale factor (point size) of the font
being used.
"""
from collections import namedtuple
import logging
import re
from ._mathtext_data import uni2type1
_log = logging.getLogger(__name__)
def _to_int(x):
# Some AFM files have floats where we are expecting ints -- there is
# probably a better way to handle this (support floats, round rather than
# truncate). But I don't know what the best approach is now and this
# change to _to_int should at least prevent Matplotlib from crashing on
# these. JDH (2009-11-06)
return int(float(x))
def _to_float(x):
# Some AFM files use "," instead of "." as decimal separator -- this
# shouldn't be ambiguous (unless someone is wicked enough to use "," as
# thousands separator...).
if isinstance(x, bytes):
# Encoding doesn't really matter -- if we have codepoints >127 the call
# to float() will error anyways.
x = x.decode('latin-1')
return float(x.replace(',', '.'))
def _to_str(x):
return x.decode('utf8')
def _to_list_of_ints(s):
s = s.replace(b',', b' ')
return [_to_int(val) for val in s.split()]
def _to_list_of_floats(s):
return [_to_float(val) for val in s.split()]
def _to_bool(s):
if s.lower().strip() in (b'false', b'0', b'no'):
return False
else:
return True
def _parse_header(fh):
"""
Read the font metrics header (up to the char metrics) and returns
a dictionary mapping *key* to *val*. *val* will be converted to the
appropriate python type as necessary; e.g.:
* 'False'->False
* '0'->0
* '-168 -218 1000 898'-> [-168, -218, 1000, 898]
Dictionary keys are
StartFontMetrics, FontName, FullName, FamilyName, Weight,
ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition,
UnderlineThickness, Version, Notice, EncodingScheme, CapHeight,
XHeight, Ascender, Descender, StartCharMetrics
"""
header_converters = {
b'StartFontMetrics': _to_float,
b'FontName': _to_str,
b'FullName': _to_str,
b'FamilyName': _to_str,
b'Weight': _to_str,
b'ItalicAngle': _to_float,
b'IsFixedPitch': _to_bool,
b'FontBBox': _to_list_of_ints,
b'UnderlinePosition': _to_float,
b'UnderlineThickness': _to_float,
b'Version': _to_str,
# Some AFM files have non-ASCII characters (which are not allowed by
# the spec). Given that there is actually no public API to even access
# this field, just return it as straight bytes.
b'Notice': lambda x: x,
b'EncodingScheme': _to_str,
b'CapHeight': _to_float, # Is the second version a mistake, or
b'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS
b'XHeight': _to_float,
b'Ascender': _to_float,
b'Descender': _to_float,
b'StdHW': _to_float,
b'StdVW': _to_float,
b'StartCharMetrics': _to_int,
b'CharacterSet': _to_str,
b'Characters': _to_int,
}
d = {}
first_line = True
for line in fh:
line = line.rstrip()
if line.startswith(b'Comment'):
continue
lst = line.split(b' ', 1)
key = lst[0]
if first_line:
# AFM spec, Section 4: The StartFontMetrics keyword
# [followed by a version number] must be the first line in
# the file, and the EndFontMetrics keyword must be the
# last non-empty line in the file. We just check the
# first header entry.
if key != b'StartFontMetrics':
raise RuntimeError('Not an AFM file')
first_line = False
if len(lst) == 2:
val = lst[1]
else:
val = b''
try:
converter = header_converters[key]
except KeyError:
_log.error("Found an unknown keyword in AFM header (was %r)", key)
continue
try:
d[key] = converter(val)
except ValueError:
_log.error('Value error parsing header in AFM: %s, %s', key, val)
continue
if key == b'StartCharMetrics':
break
else:
raise RuntimeError('Bad parse')
return d
CharMetrics = namedtuple('CharMetrics', 'width, name, bbox')
CharMetrics.__doc__ = """
Represents the character metrics of a single character.
Notes
-----
The fields do currently only describe a subset of character metrics
information defined in the AFM standard.
"""
CharMetrics.width.__doc__ = """The character width (WX)."""
CharMetrics.name.__doc__ = """The character name (N)."""
CharMetrics.bbox.__doc__ = """
The bbox of the character (B) as a tuple (*llx*, *lly*, *urx*, *ury*)."""
def _parse_char_metrics(fh):
"""
Parse the given filehandle for character metrics information and return
the information as dicts.
It is assumed that the file cursor is on the line behind
'StartCharMetrics'.
Returns
-------
ascii_d : dict
A mapping "ASCII num of the character" to `.CharMetrics`.
name_d : dict
A mapping "character name" to `.CharMetrics`.
Notes
-----
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
required_keys = {'C', 'WX', 'N', 'B'}
ascii_d = {}
name_d = {}
for line in fh:
# We are defensively letting values be utf8. The spec requires
# ascii, but there are non-compliant fonts in circulation
line = _to_str(line.rstrip()) # Convert from byte-literal
if line.startswith('EndCharMetrics'):
return ascii_d, name_d
# Split the metric line into a dictionary, keyed by metric identifiers
vals = dict(s.strip().split(' ', 1) for s in line.split(';') if s)
# There may be other metrics present, but only these are needed
if not required_keys.issubset(vals):
raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals['C'])
wx = _to_float(vals['WX'])
name = vals['N']
bbox = _to_list_of_floats(vals['B'])
bbox = list(map(int, bbox))
metrics = CharMetrics(wx, name, bbox)
# Workaround: If the character name is 'Euro', give it the
# corresponding character code, according to WinAnsiEncoding (see PDF
# Reference).
if name == 'Euro':
num = 128
elif name == 'minus':
num = ord("\N{MINUS SIGN}") # 0x2212
if num != -1:
ascii_d[num] = metrics
name_d[name] = metrics
raise RuntimeError('Bad parse')
def _parse_kern_pairs(fh):
"""
Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and
values are the kern pair value. For example, a kern pairs line like
``KPX A y -50``
will be represented as::
d[ ('A', 'y') ] = -50
"""
line = next(fh)
if not line.startswith(b'StartKernPairs'):
raise RuntimeError('Bad start of kern pairs data: %s' % line)
d = {}
for line in fh:
line = line.rstrip()
if not line:
continue
if line.startswith(b'EndKernPairs'):
next(fh) # EndKernData
return d
vals = line.split()
if len(vals) != 4 or vals[0] != b'KPX':
raise RuntimeError('Bad kern pairs line: %s' % line)
c1, c2, val = _to_str(vals[1]), _to_str(vals[2]), _to_float(vals[3])
d[(c1, c2)] = val
raise RuntimeError('Bad kern pairs parse')
CompositePart = namedtuple('CompositePart', 'name, dx, dy')
CompositePart.__doc__ = """
Represents the information on a composite element of a composite char."""
CompositePart.name.__doc__ = """Name of the part, e.g. 'acute'."""
CompositePart.dx.__doc__ = """x-displacement of the part from the origin."""
CompositePart.dy.__doc__ = """y-displacement of the part from the origin."""
def _parse_composites(fh):
"""
Parse the given filehandle for composites information return them as a
dict.
It is assumed that the file cursor is on the line behind 'StartComposites'.
Returns
-------
dict
A dict mapping composite character names to a parts list. The parts
list is a list of `.CompositePart` entries describing the parts of
the composite.
Examples
--------
A composite definition line::
CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ;
will be represented as::
composites['Aacute'] = [CompositePart(name='A', dx=0, dy=0),
CompositePart(name='acute', dx=160, dy=170)]
"""
composites = {}
for line in fh:
line = line.rstrip()
if not line:
continue
if line.startswith(b'EndComposites'):
return composites
vals = line.split(b';')
cc = vals[0].split()
name, _num_parts = cc[1], _to_int(cc[2])
pccParts = []
for s in vals[1:-1]:
pcc = s.split()
part = CompositePart(pcc[1], _to_float(pcc[2]), _to_float(pcc[3]))
pccParts.append(part)
composites[name] = pccParts
raise RuntimeError('Bad composites parse')
def _parse_optional(fh):
"""
Parse the optional fields for kern pair data and composites.
Returns
-------
kern_data : dict
A dict containing kerning information. May be empty.
See `._parse_kern_pairs`.
composites : dict
A dict containing composite information. May be empty.
See `._parse_composites`.
"""
optional = {
b'StartKernData': _parse_kern_pairs,
b'StartComposites': _parse_composites,
}
d = {b'StartKernData': {},
b'StartComposites': {}}
for line in fh:
line = line.rstrip()
if not line:
continue
key = line.split()[0]
if key in optional:
d[key] = optional[key](fh)
return d[b'StartKernData'], d[b'StartComposites']
class AFM:
def __init__(self, fh):
"""Parse the AFM file in file object *fh*."""
self._header = _parse_header(fh)
self._metrics, self._metrics_by_name = _parse_char_metrics(fh)
self._kern, self._composite = _parse_optional(fh)
def get_bbox_char(self, c, isord=False):
if not isord:
c = ord(c)
return self._metrics[c].bbox
def string_width_height(self, s):
"""
Return the string width (including kerning) and string height
as a (*w*, *h*) tuple.
"""
if not len(s):
return 0, 0
total_width = 0
namelast = None
miny = 1e9
maxy = 0
for c in s:
if c == '\n':
continue
wx, name, bbox = self._metrics[ord(c)]
total_width += wx + self._kern.get((namelast, name), 0)
l, b, w, h = bbox
miny = min(miny, b)
maxy = max(maxy, b + h)
namelast = name
return total_width, maxy - miny
def get_str_bbox_and_descent(self, s):
"""Return the string bounding box and the maximal descent."""
if not len(s):
return 0, 0, 0, 0, 0
total_width = 0
namelast = None
miny = 1e9
maxy = 0
left = 0
if not isinstance(s, str):
s = _to_str(s)
for c in s:
if c == '\n':
continue
name = uni2type1.get(ord(c), f"uni{ord(c):04X}")
try:
wx, _, bbox = self._metrics_by_name[name]
except KeyError:
name = 'question'
wx, _, bbox = self._metrics_by_name[name]
total_width += wx + self._kern.get((namelast, name), 0)
l, b, w, h = bbox
left = min(left, l)
miny = min(miny, b)
maxy = max(maxy, b + h)
namelast = name
return left, miny, total_width, maxy - miny, -miny
def get_str_bbox(self, s):
"""Return the string bounding box."""
return self.get_str_bbox_and_descent(s)[:4]
def get_name_char(self, c, isord=False):
"""Get the name of the character, i.e., ';' is 'semicolon'."""
if not isord:
c = ord(c)
return self._metrics[c].name
def get_width_char(self, c, isord=False):
"""
Get the width of the character from the character metric WX field.
"""
if not isord:
c = ord(c)
return self._metrics[c].width
def get_width_from_char_name(self, name):
"""Get the width of the character from a type1 character name."""
return self._metrics_by_name[name].width
def get_height_char(self, c, isord=False):
"""Get the bounding box (ink) height of character *c* (space is 0)."""
if not isord:
c = ord(c)
return self._metrics[c].bbox[-1]
def get_kern_dist(self, c1, c2):
"""
Return the kerning pair distance (possibly 0) for chars *c1* and *c2*.
"""
name1, name2 = self.get_name_char(c1), self.get_name_char(c2)
return self.get_kern_dist_from_name(name1, name2)
def get_kern_dist_from_name(self, name1, name2):
"""
Return the kerning pair distance (possibly 0) for chars
*name1* and *name2*.
"""
return self._kern.get((name1, name2), 0)
def get_fontname(self):
"""Return the font name, e.g., 'Times-Roman'."""
return self._header[b'FontName']
@property
def postscript_name(self): # For consistency with FT2Font.
return self.get_fontname()
def get_fullname(self):
"""Return the font full name, e.g., 'Times-Roman'."""
name = self._header.get(b'FullName')
if name is None: # use FontName as a substitute
name = self._header[b'FontName']
return name
def get_familyname(self):
"""Return the font family name, e.g., 'Times'."""
name = self._header.get(b'FamilyName')
if name is not None:
return name
# FamilyName not specified so we'll make a guess
name = self.get_fullname()
extras = (r'(?i)([ -](regular|plain|italic|oblique|bold|semibold|'
r'light|ultralight|extra|condensed))+$')
return re.sub(extras, '', name)
@property
def family_name(self):
"""The font family name, e.g., 'Times'."""
return self.get_familyname()
def get_weight(self):
"""Return the font weight, e.g., 'Bold' or 'Roman'."""
return self._header[b'Weight']
def get_angle(self):
"""Return the fontangle as float."""
return self._header[b'ItalicAngle']
def get_capheight(self):
"""Return the cap height as float."""
return self._header[b'CapHeight']
def get_xheight(self):
"""Return the xheight as float."""
return self._header[b'XHeight']
def get_underline_thickness(self):
"""Return the underline thickness as float."""
return self._header[b'UnderlineThickness']
def get_horizontal_stem_width(self):
"""
Return the standard horizontal stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get(b'StdHW', None)
def get_vertical_stem_width(self):
"""
Return the standard vertical stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get(b'StdVW', None)
# JavaScript template for HTMLWriter
JS_INCLUDE = """
"""
# Style definitions for the HTML template
STYLE_INCLUDE = """
"""
# HTML template for HTMLWriter
DISPLAY_TEMPLATE = """
def blocking_input_loop(figure, event_names, timeout, handler):
"""
Run *figure*'s event loop while listening to interactive events.
The events listed in *event_names* are passed to *handler*.
This function is used to implement `.Figure.waitforbuttonpress`,
`.Figure.ginput`, and `.Axes.clabel`.
Parameters
----------
figure : `~matplotlib.figure.Figure`
event_names : list of str
The names of the events passed to *handler*.
timeout : float
If positive, the event loop is stopped after *timeout* seconds.
handler : Callable[[Event], Any]
Function called for each event; it can force an early exit of the event
loop by calling ``canvas.stop_event_loop()``.
"""
if figure.canvas.manager:
figure.show() # Ensure that the figure is shown if we are managing it.
# Connect the events to the on_event function call.
cids = [figure.canvas.mpl_connect(name, handler) for name in event_names]
try:
figure.canvas.start_event_loop(timeout) # Start event loop.
finally: # Run even on exception like ctrl-c.
# Disconnect the callbacks.
for cid in cids:
figure.canvas.mpl_disconnect(cid)
"""
Adjust subplot layouts so that there are no overlapping Axes or Axes
decorations. All Axes decorations are dealt with (labels, ticks, titles,
ticklabels) and some dependent artists are also dealt with (colorbar,
suptitle).
Layout is done via `~matplotlib.gridspec`, with one constraint per gridspec,
so it is possible to have overlapping Axes if the gridspecs overlap (i.e.
using `~matplotlib.gridspec.GridSpecFromSubplotSpec`). Axes placed using
``figure.subplots()`` or ``figure.add_subplots()`` will participate in the
layout. Axes manually placed via ``figure.add_axes()`` will not.
See Tutorial: :ref:`constrainedlayout_guide`
General idea:
-------------
First, a figure has a gridspec that divides the figure into nrows and ncols,
with heights and widths set by ``height_ratios`` and ``width_ratios``,
often just set to 1 for an equal grid.
Subplotspecs that are derived from this gridspec can contain either a
``SubPanel``, a ``GridSpecFromSubplotSpec``, or an ``Axes``. The ``SubPanel``
and ``GridSpecFromSubplotSpec`` are dealt with recursively and each contain an
analogous layout.
Each ``GridSpec`` has a ``_layoutgrid`` attached to it. The ``_layoutgrid``
has the same logical layout as the ``GridSpec``. Each row of the grid spec
has a top and bottom "margin" and each column has a left and right "margin".
The "inner" height of each row is constrained to be the same (or as modified
by ``height_ratio``), and the "inner" width of each column is
constrained to be the same (as modified by ``width_ratio``), where "inner"
is the width or height of each column/row minus the size of the margins.
Then the size of the margins for each row and column are determined as the
max width of the decorators on each Axes that has decorators in that margin.
For instance, a normal Axes would have a left margin that includes the
left ticklabels, and the ylabel if it exists. The right margin may include a
colorbar, the bottom margin the xaxis decorations, and the top margin the
title.
With these constraints, the solver then finds appropriate bounds for the
columns and rows. It's possible that the margins take up the whole figure,
in which case the algorithm is not applied and a warning is raised.
See the tutorial :ref:`constrainedlayout_guide`
for more discussion of the algorithm with examples.
"""
import logging
import numpy as np
from matplotlib import _api, artist as martist
import matplotlib.transforms as mtransforms
import matplotlib._layoutgrid as mlayoutgrid
_log = logging.getLogger(__name__)
######################################################
def do_constrained_layout(fig, h_pad, w_pad,
hspace=None, wspace=None, rect=(0, 0, 1, 1),
compress=False):
"""
Do the constrained_layout. Called at draw time in
``figure.constrained_layout()``
Parameters
----------
fig : `~matplotlib.figure.Figure`
`.Figure` instance to do the layout in.
h_pad, w_pad : float
Padding around the Axes elements in figure-normalized units.
hspace, wspace : float
Fraction of the figure to dedicate to space between the
Axes. These are evenly spread between the gaps between the Axes.
A value of 0.2 for a three-column layout would have a space
of 0.1 of the figure width between each column.
If h/wspace < h/w_pad, then the pads are used instead.
rect : tuple of 4 floats
Rectangle in figure coordinates to perform constrained layout in
[left, bottom, width, height], each from 0-1.
compress : bool
Whether to shift Axes so that white space in between them is
removed. This is useful for simple grids of fixed-aspect Axes (e.g.
a grid of images).
Returns
-------
layoutgrid : private debugging structure
"""
renderer = fig._get_renderer()
# make layoutgrid tree...
layoutgrids = make_layoutgrids(fig, None, rect=rect)
if not layoutgrids['hasgrids']:
_api.warn_external('There are no gridspecs with layoutgrids. '
'Possibly did not call parent GridSpec with the'
' "figure" keyword')
return
for _ in range(2):
# do the algorithm twice. This has to be done because decorations
# change size after the first re-position (i.e. x/yticklabels get
# larger/smaller). This second reposition tends to be much milder,
# so doing twice makes things work OK.
# make margins for all the Axes and subfigures in the
# figure. Add margins for colorbars...
make_layout_margins(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad, hspace=hspace, wspace=wspace)
make_margin_suptitles(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad)
# if a layout is such that a columns (or rows) margin has no
# constraints, we need to make all such instances in the grid
# match in margin size.
match_submerged_margins(layoutgrids, fig)
# update all the variables in the layout.
layoutgrids[fig].update_variables()
warn_collapsed = ('constrained_layout not applied because '
'axes sizes collapsed to zero. Try making '
'figure larger or Axes decorations smaller.')
if check_no_collapsed_axes(layoutgrids, fig):
reposition_axes(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad, hspace=hspace, wspace=wspace)
if compress:
layoutgrids = compress_fixed_aspect(layoutgrids, fig)
layoutgrids[fig].update_variables()
if check_no_collapsed_axes(layoutgrids, fig):
reposition_axes(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad, hspace=hspace, wspace=wspace)
else:
_api.warn_external(warn_collapsed)
if ((suptitle := fig._suptitle) is not None and
suptitle.get_in_layout() and suptitle._autopos):
x, _ = suptitle.get_position()
suptitle.set_position(
(x, layoutgrids[fig].get_inner_bbox().y1 + h_pad))
suptitle.set_verticalalignment('bottom')
else:
_api.warn_external(warn_collapsed)
reset_margins(layoutgrids, fig)
return layoutgrids
def make_layoutgrids(fig, layoutgrids, rect=(0, 0, 1, 1)):
"""
Make the layoutgrid tree.
(Sub)Figures get a layoutgrid so we can have figure margins.
Gridspecs that are attached to Axes get a layoutgrid so Axes
can have margins.
"""
if layoutgrids is None:
layoutgrids = dict()
layoutgrids['hasgrids'] = False
if not hasattr(fig, '_parent'):
# top figure; pass rect as parent to allow user-specified
# margins
layoutgrids[fig] = mlayoutgrid.LayoutGrid(parent=rect, name='figlb')
else:
# subfigure
gs = fig._subplotspec.get_gridspec()
# it is possible the gridspec containing this subfigure hasn't
# been added to the tree yet:
layoutgrids = make_layoutgrids_gs(layoutgrids, gs)
# add the layoutgrid for the subfigure:
parentlb = layoutgrids[gs]
layoutgrids[fig] = mlayoutgrid.LayoutGrid(
parent=parentlb,
name='panellb',
parent_inner=True,
nrows=1, ncols=1,
parent_pos=(fig._subplotspec.rowspan,
fig._subplotspec.colspan))
# recursively do all subfigures in this figure...
for sfig in fig.subfigs:
layoutgrids = make_layoutgrids(sfig, layoutgrids)
# for each Axes at the local level add its gridspec:
for ax in fig._localaxes:
gs = ax.get_gridspec()
if gs is not None:
layoutgrids = make_layoutgrids_gs(layoutgrids, gs)
return layoutgrids
def make_layoutgrids_gs(layoutgrids, gs):
"""
Make the layoutgrid for a gridspec (and anything nested in the gridspec)
"""
if gs in layoutgrids or gs.figure is None:
return layoutgrids
# in order to do constrained_layout there has to be at least *one*
# gridspec in the tree:
layoutgrids['hasgrids'] = True
if not hasattr(gs, '_subplot_spec'):
# normal gridspec
parent = layoutgrids[gs.figure]
layoutgrids[gs] = mlayoutgrid.LayoutGrid(
parent=parent,
parent_inner=True,
name='gridspec',
ncols=gs._ncols, nrows=gs._nrows,
width_ratios=gs.get_width_ratios(),
height_ratios=gs.get_height_ratios())
else:
# this is a gridspecfromsubplotspec:
subplot_spec = gs._subplot_spec
parentgs = subplot_spec.get_gridspec()
# if a nested gridspec it is possible the parent is not in there yet:
if parentgs not in layoutgrids:
layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs)
subspeclb = layoutgrids[parentgs]
# gridspecfromsubplotspec need an outer container:
# get a unique representation:
rep = (gs, 'top')
if rep not in layoutgrids:
layoutgrids[rep] = mlayoutgrid.LayoutGrid(
parent=subspeclb,
name='top',
nrows=1, ncols=1,
parent_pos=(subplot_spec.rowspan, subplot_spec.colspan))
layoutgrids[gs] = mlayoutgrid.LayoutGrid(
parent=layoutgrids[rep],
name='gridspec',
nrows=gs._nrows, ncols=gs._ncols,
width_ratios=gs.get_width_ratios(),
height_ratios=gs.get_height_ratios())
return layoutgrids
def check_no_collapsed_axes(layoutgrids, fig):
"""
Check that no Axes have collapsed to zero size.
"""
for sfig in fig.subfigs:
ok = check_no_collapsed_axes(layoutgrids, sfig)
if not ok:
return False
for ax in fig.axes:
gs = ax.get_gridspec()
if gs in layoutgrids: # also implies gs is not None.
lg = layoutgrids[gs]
for i in range(gs.nrows):
for j in range(gs.ncols):
bb = lg.get_inner_bbox(i, j)
if bb.width <= 0 or bb.height <= 0:
return False
return True
def compress_fixed_aspect(layoutgrids, fig):
gs = None
for ax in fig.axes:
if ax.get_subplotspec() is None:
continue
ax.apply_aspect()
sub = ax.get_subplotspec()
_gs = sub.get_gridspec()
if gs is None:
gs = _gs
extraw = np.zeros(gs.ncols)
extrah = np.zeros(gs.nrows)
elif _gs != gs:
raise ValueError('Cannot do compressed layout if Axes are not'
'all from the same gridspec')
orig = ax.get_position(original=True)
actual = ax.get_position(original=False)
dw = orig.width - actual.width
if dw > 0:
extraw[sub.colspan] = np.maximum(extraw[sub.colspan], dw)
dh = orig.height - actual.height
if dh > 0:
extrah[sub.rowspan] = np.maximum(extrah[sub.rowspan], dh)
if gs is None:
raise ValueError('Cannot do compressed layout if no Axes '
'are part of a gridspec.')
w = np.sum(extraw) / 2
layoutgrids[fig].edit_margin_min('left', w)
layoutgrids[fig].edit_margin_min('right', w)
h = np.sum(extrah) / 2
layoutgrids[fig].edit_margin_min('top', h)
layoutgrids[fig].edit_margin_min('bottom', h)
return layoutgrids
def get_margin_from_padding(obj, *, w_pad=0, h_pad=0,
hspace=0, wspace=0):
ss = obj._subplotspec
gs = ss.get_gridspec()
if hasattr(gs, 'hspace'):
_hspace = (gs.hspace if gs.hspace is not None else hspace)
_wspace = (gs.wspace if gs.wspace is not None else wspace)
else:
_hspace = (gs._hspace if gs._hspace is not None else hspace)
_wspace = (gs._wspace if gs._wspace is not None else wspace)
_wspace = _wspace / 2
_hspace = _hspace / 2
nrows, ncols = gs.get_geometry()
# there are two margins for each direction. The "cb"
# margins are for pads and colorbars, the non-"cb" are
# for the Axes decorations (labels etc).
margin = {'leftcb': w_pad, 'rightcb': w_pad,
'bottomcb': h_pad, 'topcb': h_pad,
'left': 0, 'right': 0,
'top': 0, 'bottom': 0}
if _wspace / ncols > w_pad:
if ss.colspan.start > 0:
margin['leftcb'] = _wspace / ncols
if ss.colspan.stop < ncols:
margin['rightcb'] = _wspace / ncols
if _hspace / nrows > h_pad:
if ss.rowspan.stop < nrows:
margin['bottomcb'] = _hspace / nrows
if ss.rowspan.start > 0:
margin['topcb'] = _hspace / nrows
return margin
def make_layout_margins(layoutgrids, fig, renderer, *, w_pad=0, h_pad=0,
hspace=0, wspace=0):
"""
For each Axes, make a margin between the *pos* layoutbox and the
*axes* layoutbox be a minimum size that can accommodate the
decorations on the axis.
Then make room for colorbars.
Parameters
----------
layoutgrids : dict
fig : `~matplotlib.figure.Figure`
`.Figure` instance to do the layout in.
renderer : `~matplotlib.backend_bases.RendererBase` subclass.
The renderer to use.
w_pad, h_pad : float, default: 0
Width and height padding (in fraction of figure).
hspace, wspace : float, default: 0
Width and height padding as fraction of figure size divided by
number of columns or rows.
"""
for sfig in fig.subfigs: # recursively make child panel margins
ss = sfig._subplotspec
gs = ss.get_gridspec()
make_layout_margins(layoutgrids, sfig, renderer,
w_pad=w_pad, h_pad=h_pad,
hspace=hspace, wspace=wspace)
margins = get_margin_from_padding(sfig, w_pad=0, h_pad=0,
hspace=hspace, wspace=wspace)
layoutgrids[gs].edit_outer_margin_mins(margins, ss)
for ax in fig._localaxes:
if not ax.get_subplotspec() or not ax.get_in_layout():
continue
ss = ax.get_subplotspec()
gs = ss.get_gridspec()
if gs not in layoutgrids:
return
margin = get_margin_from_padding(ax, w_pad=w_pad, h_pad=h_pad,
hspace=hspace, wspace=wspace)
pos, bbox = get_pos_and_bbox(ax, renderer)
# the margin is the distance between the bounding box of the Axes
# and its position (plus the padding from above)
margin['left'] += pos.x0 - bbox.x0
margin['right'] += bbox.x1 - pos.x1
# remember that rows are ordered from top:
margin['bottom'] += pos.y0 - bbox.y0
margin['top'] += bbox.y1 - pos.y1
# make margin for colorbars. These margins go in the
# padding margin, versus the margin for Axes decorators.
for cbax in ax._colorbars:
# note pad is a fraction of the parent width...
pad = colorbar_get_pad(layoutgrids, cbax)
# colorbars can be child of more than one subplot spec:
cbp_rspan, cbp_cspan = get_cb_parent_spans(cbax)
loc = cbax._colorbar_info['location']
cbpos, cbbbox = get_pos_and_bbox(cbax, renderer)
if loc == 'right':
if cbp_cspan.stop == ss.colspan.stop:
# only increase if the colorbar is on the right edge
margin['rightcb'] += cbbbox.width + pad
elif loc == 'left':
if cbp_cspan.start == ss.colspan.start:
# only increase if the colorbar is on the left edge
margin['leftcb'] += cbbbox.width + pad
elif loc == 'top':
if cbp_rspan.start == ss.rowspan.start:
margin['topcb'] += cbbbox.height + pad
else:
if cbp_rspan.stop == ss.rowspan.stop:
margin['bottomcb'] += cbbbox.height + pad
# If the colorbars are wider than the parent box in the
# cross direction
if loc in ['top', 'bottom']:
if (cbp_cspan.start == ss.colspan.start and
cbbbox.x0 < bbox.x0):
margin['left'] += bbox.x0 - cbbbox.x0
if (cbp_cspan.stop == ss.colspan.stop and
cbbbox.x1 > bbox.x1):
margin['right'] += cbbbox.x1 - bbox.x1
# or taller:
if loc in ['left', 'right']:
if (cbp_rspan.stop == ss.rowspan.stop and
cbbbox.y0 < bbox.y0):
margin['bottom'] += bbox.y0 - cbbbox.y0
if (cbp_rspan.start == ss.rowspan.start and
cbbbox.y1 > bbox.y1):
margin['top'] += cbbbox.y1 - bbox.y1
# pass the new margins down to the layout grid for the solution...
layoutgrids[gs].edit_outer_margin_mins(margin, ss)
# make margins for figure-level legends:
for leg in fig.legends:
inv_trans_fig = None
if leg._outside_loc and leg._bbox_to_anchor is None:
if inv_trans_fig is None:
inv_trans_fig = fig.transFigure.inverted().transform_bbox
bbox = inv_trans_fig(leg.get_tightbbox(renderer))
w = bbox.width + 2 * w_pad
h = bbox.height + 2 * h_pad
legendloc = leg._outside_loc
if legendloc == 'lower':
layoutgrids[fig].edit_margin_min('bottom', h)
elif legendloc == 'upper':
layoutgrids[fig].edit_margin_min('top', h)
if legendloc == 'right':
layoutgrids[fig].edit_margin_min('right', w)
elif legendloc == 'left':
layoutgrids[fig].edit_margin_min('left', w)
def make_margin_suptitles(layoutgrids, fig, renderer, *, w_pad=0, h_pad=0):
# Figure out how large the suptitle is and make the
# top level figure margin larger.
inv_trans_fig = fig.transFigure.inverted().transform_bbox
# get the h_pad and w_pad as distances in the local subfigure coordinates:
padbox = mtransforms.Bbox([[0, 0], [w_pad, h_pad]])
padbox = (fig.transFigure -
fig.transSubfigure).transform_bbox(padbox)
h_pad_local = padbox.height
w_pad_local = padbox.width
for sfig in fig.subfigs:
make_margin_suptitles(layoutgrids, sfig, renderer,
w_pad=w_pad, h_pad=h_pad)
if fig._suptitle is not None and fig._suptitle.get_in_layout():
p = fig._suptitle.get_position()
if getattr(fig._suptitle, '_autopos', False):
fig._suptitle.set_position((p[0], 1 - h_pad_local))
bbox = inv_trans_fig(fig._suptitle.get_tightbbox(renderer))
layoutgrids[fig].edit_margin_min('top', bbox.height + 2 * h_pad)
if fig._supxlabel is not None and fig._supxlabel.get_in_layout():
p = fig._supxlabel.get_position()
if getattr(fig._supxlabel, '_autopos', False):
fig._supxlabel.set_position((p[0], h_pad_local))
bbox = inv_trans_fig(fig._supxlabel.get_tightbbox(renderer))
layoutgrids[fig].edit_margin_min('bottom',
bbox.height + 2 * h_pad)
if fig._supylabel is not None and fig._supylabel.get_in_layout():
p = fig._supylabel.get_position()
if getattr(fig._supylabel, '_autopos', False):
fig._supylabel.set_position((w_pad_local, p[1]))
bbox = inv_trans_fig(fig._supylabel.get_tightbbox(renderer))
layoutgrids[fig].edit_margin_min('left', bbox.width + 2 * w_pad)
def match_submerged_margins(layoutgrids, fig):
"""
Make the margins that are submerged inside an Axes the same size.
This allows Axes that span two columns (or rows) that are offset
from one another to have the same size.
This gives the proper layout for something like::
fig = plt.figure(constrained_layout=True)
axs = fig.subplot_mosaic("AAAB\nCCDD")
Without this routine, the Axes D will be wider than C, because the
margin width between the two columns in C has no width by default,
whereas the margins between the two columns of D are set by the
width of the margin between A and B. However, obviously the user would
like C and D to be the same size, so we need to add constraints to these
"submerged" margins.
This routine makes all the interior margins the same, and the spacing
between the three columns in A and the two column in C are all set to the
margins between the two columns of D.
See test_constrained_layout::test_constrained_layout12 for an example.
"""
axsdone = []
for sfig in fig.subfigs:
axsdone += match_submerged_margins(layoutgrids, sfig)
axs = [a for a in fig.get_axes()
if (a.get_subplotspec() is not None and a.get_in_layout() and
a not in axsdone)]
for ax1 in axs:
ss1 = ax1.get_subplotspec()
if ss1.get_gridspec() not in layoutgrids:
axs.remove(ax1)
continue
lg1 = layoutgrids[ss1.get_gridspec()]
# interior columns:
if len(ss1.colspan) > 1:
maxsubl = np.max(
lg1.margin_vals['left'][ss1.colspan[1:]] +
lg1.margin_vals['leftcb'][ss1.colspan[1:]]
)
maxsubr = np.max(
lg1.margin_vals['right'][ss1.colspan[:-1]] +
lg1.margin_vals['rightcb'][ss1.colspan[:-1]]
)
for ax2 in axs:
ss2 = ax2.get_subplotspec()
lg2 = layoutgrids[ss2.get_gridspec()]
if lg2 is not None and len(ss2.colspan) > 1:
maxsubl2 = np.max(
lg2.margin_vals['left'][ss2.colspan[1:]] +
lg2.margin_vals['leftcb'][ss2.colspan[1:]])
if maxsubl2 > maxsubl:
maxsubl = maxsubl2
maxsubr2 = np.max(
lg2.margin_vals['right'][ss2.colspan[:-1]] +
lg2.margin_vals['rightcb'][ss2.colspan[:-1]])
if maxsubr2 > maxsubr:
maxsubr = maxsubr2
for i in ss1.colspan[1:]:
lg1.edit_margin_min('left', maxsubl, cell=i)
for i in ss1.colspan[:-1]:
lg1.edit_margin_min('right', maxsubr, cell=i)
# interior rows:
if len(ss1.rowspan) > 1:
maxsubt = np.max(
lg1.margin_vals['top'][ss1.rowspan[1:]] +
lg1.margin_vals['topcb'][ss1.rowspan[1:]]
)
maxsubb = np.max(
lg1.margin_vals['bottom'][ss1.rowspan[:-1]] +
lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]]
)
for ax2 in axs:
ss2 = ax2.get_subplotspec()
lg2 = layoutgrids[ss2.get_gridspec()]
if lg2 is not None:
if len(ss2.rowspan) > 1:
maxsubt = np.max([np.max(
lg2.margin_vals['top'][ss2.rowspan[1:]] +
lg2.margin_vals['topcb'][ss2.rowspan[1:]]
), maxsubt])
maxsubb = np.max([np.max(
lg2.margin_vals['bottom'][ss2.rowspan[:-1]] +
lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]]
), maxsubb])
for i in ss1.rowspan[1:]:
lg1.edit_margin_min('top', maxsubt, cell=i)
for i in ss1.rowspan[:-1]:
lg1.edit_margin_min('bottom', maxsubb, cell=i)
return axs
def get_cb_parent_spans(cbax):
"""
Figure out which subplotspecs this colorbar belongs to.
Parameters
----------
cbax : `~matplotlib.axes.Axes`
Axes for the colorbar.
"""
rowstart = np.inf
rowstop = -np.inf
colstart = np.inf
colstop = -np.inf
for parent in cbax._colorbar_info['parents']:
ss = parent.get_subplotspec()
rowstart = min(ss.rowspan.start, rowstart)
rowstop = max(ss.rowspan.stop, rowstop)
colstart = min(ss.colspan.start, colstart)
colstop = max(ss.colspan.stop, colstop)
rowspan = range(rowstart, rowstop)
colspan = range(colstart, colstop)
return rowspan, colspan
def get_pos_and_bbox(ax, renderer):
"""
Get the position and the bbox for the Axes.
Parameters
----------
ax : `~matplotlib.axes.Axes`
renderer : `~matplotlib.backend_bases.RendererBase` subclass.
Returns
-------
pos : `~matplotlib.transforms.Bbox`
Position in figure coordinates.
bbox : `~matplotlib.transforms.Bbox`
Tight bounding box in figure coordinates.
"""
fig = ax.get_figure(root=False)
pos = ax.get_position(original=True)
# pos is in panel co-ords, but we need in figure for the layout
pos = pos.transformed(fig.transSubfigure - fig.transFigure)
tightbbox = martist._get_tightbbox_for_layout_only(ax, renderer)
if tightbbox is None:
bbox = pos
else:
bbox = tightbbox.transformed(fig.transFigure.inverted())
return pos, bbox
def reposition_axes(layoutgrids, fig, renderer, *,
w_pad=0, h_pad=0, hspace=0, wspace=0):
"""
Reposition all the Axes based on the new inner bounding box.
"""
trans_fig_to_subfig = fig.transFigure - fig.transSubfigure
for sfig in fig.subfigs:
bbox = layoutgrids[sfig].get_outer_bbox()
sfig._redo_transform_rel_fig(
bbox=bbox.transformed(trans_fig_to_subfig))
reposition_axes(layoutgrids, sfig, renderer,
w_pad=w_pad, h_pad=h_pad,
wspace=wspace, hspace=hspace)
for ax in fig._localaxes:
if ax.get_subplotspec() is None or not ax.get_in_layout():
continue
# grid bbox is in Figure coordinates, but we specify in panel
# coordinates...
ss = ax.get_subplotspec()
gs = ss.get_gridspec()
if gs not in layoutgrids:
return
bbox = layoutgrids[gs].get_inner_bbox(rows=ss.rowspan,
cols=ss.colspan)
# transform from figure to panel for set_position:
newbbox = trans_fig_to_subfig.transform_bbox(bbox)
ax._set_position(newbbox)
# move the colorbars:
# we need to keep track of oldw and oldh if there is more than
# one colorbar:
offset = {'left': 0, 'right': 0, 'bottom': 0, 'top': 0}
for nn, cbax in enumerate(ax._colorbars[::-1]):
if ax == cbax._colorbar_info['parents'][0]:
reposition_colorbar(layoutgrids, cbax, renderer,
offset=offset)
def reposition_colorbar(layoutgrids, cbax, renderer, *, offset=None):
"""
Place the colorbar in its new place.
Parameters
----------
layoutgrids : dict
cbax : `~matplotlib.axes.Axes`
Axes for the colorbar.
renderer : `~matplotlib.backend_bases.RendererBase` subclass.
The renderer to use.
offset : array-like
Offset the colorbar needs to be pushed to in order to
account for multiple colorbars.
"""
parents = cbax._colorbar_info['parents']
gs = parents[0].get_gridspec()
fig = cbax.get_figure(root=False)
trans_fig_to_subfig = fig.transFigure - fig.transSubfigure
cb_rspans, cb_cspans = get_cb_parent_spans(cbax)
bboxparent = layoutgrids[gs].get_bbox_for_cb(rows=cb_rspans,
cols=cb_cspans)
pb = layoutgrids[gs].get_inner_bbox(rows=cb_rspans, cols=cb_cspans)
location = cbax._colorbar_info['location']
anchor = cbax._colorbar_info['anchor']
fraction = cbax._colorbar_info['fraction']
aspect = cbax._colorbar_info['aspect']
shrink = cbax._colorbar_info['shrink']
cbpos, cbbbox = get_pos_and_bbox(cbax, renderer)
# Colorbar gets put at extreme edge of outer bbox of the subplotspec
# It needs to be moved in by: 1) a pad 2) its "margin" 3) by
# any colorbars already added at this location:
cbpad = colorbar_get_pad(layoutgrids, cbax)
if location in ('left', 'right'):
# fraction and shrink are fractions of parent
pbcb = pb.shrunk(fraction, shrink).anchored(anchor, pb)
# The colorbar is at the left side of the parent. Need
# to translate to right (or left)
if location == 'right':
lmargin = cbpos.x0 - cbbbox.x0
dx = bboxparent.x1 - pbcb.x0 + offset['right']
dx += cbpad + lmargin
offset['right'] += cbbbox.width + cbpad
pbcb = pbcb.translated(dx, 0)
else:
lmargin = cbpos.x0 - cbbbox.x0
dx = bboxparent.x0 - pbcb.x0 # edge of parent
dx += -cbbbox.width - cbpad + lmargin - offset['left']
offset['left'] += cbbbox.width + cbpad
pbcb = pbcb.translated(dx, 0)
else: # horizontal axes:
pbcb = pb.shrunk(shrink, fraction).anchored(anchor, pb)
if location == 'top':
bmargin = cbpos.y0 - cbbbox.y0
dy = bboxparent.y1 - pbcb.y0 + offset['top']
dy += cbpad + bmargin
offset['top'] += cbbbox.height + cbpad
pbcb = pbcb.translated(0, dy)
else:
bmargin = cbpos.y0 - cbbbox.y0
dy = bboxparent.y0 - pbcb.y0
dy += -cbbbox.height - cbpad + bmargin - offset['bottom']
offset['bottom'] += cbbbox.height + cbpad
pbcb = pbcb.translated(0, dy)
pbcb = trans_fig_to_subfig.transform_bbox(pbcb)
cbax.set_transform(fig.transSubfigure)
cbax._set_position(pbcb)
cbax.set_anchor(anchor)
if location in ['bottom', 'top']:
aspect = 1 / aspect
cbax.set_box_aspect(aspect)
cbax.set_aspect('auto')
return offset
def reset_margins(layoutgrids, fig):
"""
Reset the margins in the layoutboxes of *fig*.
Margins are usually set as a minimum, so if the figure gets smaller
the minimum needs to be zero in order for it to grow again.
"""
for sfig in fig.subfigs:
reset_margins(layoutgrids, sfig)
for ax in fig.axes:
if ax.get_in_layout():
gs = ax.get_gridspec()
if gs in layoutgrids: # also implies gs is not None.
layoutgrids[gs].reset_margins()
layoutgrids[fig].reset_margins()
def colorbar_get_pad(layoutgrids, cax):
parents = cax._colorbar_info['parents']
gs = parents[0].get_gridspec()
cb_rspans, cb_cspans = get_cb_parent_spans(cax)
bboxouter = layoutgrids[gs].get_inner_bbox(rows=cb_rspans, cols=cb_cspans)
if cax._colorbar_info['location'] in ['right', 'left']:
size = bboxouter.width
else:
size = bboxouter.height
return cax._colorbar_info['pad'] * size
venv\Lib\site-packages\matplotlib\_docstring.py
import inspect
from . import _api
def kwarg_doc(text):
"""
Decorator for defining the kwdoc documentation of artist properties.
This decorator can be applied to artist property setter methods.
The given text is stored in a private attribute ``_kwarg_doc`` on
the method. It is used to overwrite auto-generated documentation
in the *kwdoc list* for artists. The kwdoc list is used to document
``**kwargs`` when they are properties of an artist. See e.g. the
``**kwargs`` section in `.Axes.text`.
The text should contain the supported types, as well as the default
value if applicable, e.g.:
@_docstring.kwarg_doc("bool, default: :rc:`text.usetex`")
def set_usetex(self, usetex):
See Also
--------
matplotlib.artist.kwdoc
"""
def decorator(func):
func._kwarg_doc = text
return func
return decorator
class Substitution:
"""
A decorator that performs %-substitution on an object's docstring.
This decorator should be robust even if ``obj.__doc__`` is None (for
example, if -OO was passed to the interpreter).
Usage: construct a docstring.Substitution with a sequence or dictionary
suitable for performing substitution; then decorate a suitable function
with the constructed object, e.g.::
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments::
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise TypeError("Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func):
if func.__doc__:
func.__doc__ = inspect.cleandoc(func.__doc__) % self.params
return func
class _ArtistKwdocLoader(dict):
def __missing__(self, key):
if not key.endswith(":kwdoc"):
raise KeyError(key)
name = key[:-len(":kwdoc")]
from matplotlib.artist import Artist, kwdoc
try:
cls, = (cls for cls in _api.recursive_subclasses(Artist)
if cls.__name__ == name)
except ValueError as e:
raise KeyError(key) from e
return self.setdefault(key, kwdoc(cls))
class _ArtistPropertiesSubstitution:
"""
A class to substitute formatted placeholders in docstrings.
This is realized in a single instance ``_docstring.interpd``.
Use `~._ArtistPropertiesSubstition.register` to define placeholders and
their substitution, e.g. ``_docstring.interpd.register(name="some value")``.
Use this as a decorator to apply the substitution::
@_docstring.interpd
def some_func():
'''Replace %(name)s.'''
Decorating a class triggers substitution both on the class docstring and
on the class' ``__init__`` docstring (which is a commonly required
pattern for Artist subclasses).
Substitutions of the form ``%(classname:kwdoc)s`` (ending with the
literal ":kwdoc" suffix) trigger lookup of an Artist subclass with the
given *classname*, and are substituted with the `.kwdoc` of that class.
"""
def __init__(self):
self.params = _ArtistKwdocLoader()
def register(self, **kwargs):
"""
Register substitutions.
``_docstring.interpd.register(name="some value")`` makes "name" available
as a named parameter that will be replaced by "some value".
"""
self.params.update(**kwargs)
def __call__(self, obj):
if obj.__doc__:
obj.__doc__ = inspect.cleandoc(obj.__doc__) % self.params
if isinstance(obj, type) and obj.__init__ != object.__init__:
self(obj.__init__)
return obj
def copy(source):
"""Copy a docstring from another source function (if present)."""
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
# Create a decorator that will house the various docstring snippets reused
# throughout Matplotlib.
interpd = _ArtistPropertiesSubstitution()
venv\Lib\site-packages\matplotlib\_enums.py
"""
Enums representing sets of strings that Matplotlib uses as input parameters.
Matplotlib often uses simple data types like strings or tuples to define a
concept; e.g. the line capstyle can be specified as one of 'butt', 'round',
or 'projecting'. The classes in this module are used internally and serve to
document these concepts formally.
As an end-user you will not use these classes directly, but only the values
they define.
"""
from enum import Enum
from matplotlib import _docstring
class JoinStyle(str, Enum):
"""
Define how the connection between two line segments is drawn.
For a visual impression of each *JoinStyle*, `view these docs online
`, or run `JoinStyle.demo`.
Lines in Matplotlib are typically defined by a 1D `~.path.Path` and a
finite ``linewidth``, where the underlying 1D `~.path.Path` represents the
center of the stroked line.
By default, `~.backend_bases.GraphicsContextBase` defines the boundaries of
a stroked line to simply be every point within some radius,
``linewidth/2``, away from any point of the center line. However, this
results in corners appearing "rounded", which may not be the desired
behavior if you are drawing, for example, a polygon or pointed star.
**Supported values:**
.. rst-class:: value-list
'miter'
the "arrow-tip" style. Each boundary of the filled-in area will
extend in a straight line parallel to the tangent vector of the
centerline at the point it meets the corner, until they meet in a
sharp point.
'round'
stokes every point within a radius of ``linewidth/2`` of the center
lines.
'bevel'
the "squared-off" style. It can be thought of as a rounded corner
where the "circular" part of the corner has been cut off.
.. note::
Very long miter tips are cut off (to form a *bevel*) after a
backend-dependent limit called the "miter limit", which specifies the
maximum allowed ratio of miter length to line width. For example, the
PDF backend uses the default value of 10 specified by the PDF standard,
while the SVG backend does not even specify the miter limit, resulting
in a default value of 4 per the SVG specification. Matplotlib does not
currently allow the user to adjust this parameter.
A more detailed description of the effect of a miter limit can be found
in the `Mozilla Developer Docs
`_
.. plot::
:alt: Demo of possible JoinStyle's
from matplotlib._enums import JoinStyle
JoinStyle.demo()
"""
miter = "miter"
round = "round"
bevel = "bevel"
@staticmethod
def demo():
"""Demonstrate how each JoinStyle looks for various join angles."""
import numpy as np
import matplotlib.pyplot as plt
def plot_angle(ax, x, y, angle, style):
phi = np.radians(angle)
xx = [x + .5, x, x + .5*np.cos(phi)]
yy = [y, y, y + .5*np.sin(phi)]
ax.plot(xx, yy, lw=12, color='tab:blue', solid_joinstyle=style)
ax.plot(xx, yy, lw=1, color='black')
ax.plot(xx[1], yy[1], 'o', color='tab:red', markersize=3)
fig, ax = plt.subplots(figsize=(5, 4), constrained_layout=True)
ax.set_title('Join style')
for x, style in enumerate(['miter', 'round', 'bevel']):
ax.text(x, 5, style)
for y, angle in enumerate([20, 45, 60, 90, 120]):
plot_angle(ax, x, y, angle, style)
if x == 0:
ax.text(-1.3, y, f'{angle} degrees')
ax.set_xlim(-1.5, 2.75)
ax.set_ylim(-.5, 5.5)
ax.set_axis_off()
fig.show()
JoinStyle.input_description = "{" \
+ ", ".join([f"'{js.name}'" for js in JoinStyle]) \
+ "}"
class CapStyle(str, Enum):
r"""
Define how the two endpoints (caps) of an unclosed line are drawn.
How to draw the start and end points of lines that represent a closed curve
(i.e. that end in a `~.path.Path.CLOSEPOLY`) is controlled by the line's
`JoinStyle`. For all other lines, how the start and end points are drawn is
controlled by the *CapStyle*.
For a visual impression of each *CapStyle*, `view these docs online
` or run `CapStyle.demo`.
By default, `~.backend_bases.GraphicsContextBase` draws a stroked line as
squared off at its endpoints.
**Supported values:**
.. rst-class:: value-list
'butt'
the line is squared off at its endpoint.
'projecting'
the line is squared off as in *butt*, but the filled in area
extends beyond the endpoint a distance of ``linewidth/2``.
'round'
like *butt*, but a semicircular cap is added to the end of the
line, of radius ``linewidth/2``.
.. plot::
:alt: Demo of possible CapStyle's
from matplotlib._enums import CapStyle
CapStyle.demo()
"""
butt = "butt"
projecting = "projecting"
round = "round"
@staticmethod
def demo():
"""Demonstrate how each CapStyle looks for a thick line segment."""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(4, 1.2))
ax = fig.add_axes([0, 0, 1, 0.8])
ax.set_title('Cap style')
for x, style in enumerate(['butt', 'round', 'projecting']):
ax.text(x+0.25, 0.85, style, ha='center')
xx = [x, x+0.5]
yy = [0, 0]
ax.plot(xx, yy, lw=12, color='tab:blue', solid_capstyle=style)
ax.plot(xx, yy, lw=1, color='black')
ax.plot(xx, yy, 'o', color='tab:red', markersize=3)
ax.set_ylim(-.5, 1.5)
ax.set_axis_off()
fig.show()
CapStyle.input_description = "{" \
+ ", ".join([f"'{cs.name}'" for cs in CapStyle]) \
+ "}"
_docstring.interpd.register(
JoinStyle=JoinStyle.input_description,
CapStyle=CapStyle.input_description,
)
"""
A module for parsing and generating `fontconfig patterns`_.
.. _fontconfig patterns:
https://www.freedesktop.org/software/fontconfig/fontconfig-user.html
"""
# This class logically belongs in `matplotlib.font_manager`, but placing it
# there would have created cyclical dependency problems, because it also needs
# to be available from `matplotlib.rcsetup` (for parsing matplotlibrc files).
from functools import lru_cache, partial
import re
from pyparsing import (
Group, Optional, ParseException, Regex, StringEnd, Suppress, ZeroOrMore, oneOf)
_family_punc = r'\\\-:,'
_family_unescape = partial(re.compile(r'\\(?=[%s])' % _family_punc).sub, '')
_family_escape = partial(re.compile(r'(?=[%s])' % _family_punc).sub, r'\\')
_value_punc = r'\\=_:,'
_value_unescape = partial(re.compile(r'\\(?=[%s])' % _value_punc).sub, '')
_value_escape = partial(re.compile(r'(?=[%s])' % _value_punc).sub, r'\\')
_CONSTANTS = {
'thin': ('weight', 'light'),
'extralight': ('weight', 'light'),
'ultralight': ('weight', 'light'),
'light': ('weight', 'light'),
'book': ('weight', 'book'),
'regular': ('weight', 'regular'),
'normal': ('weight', 'normal'),
'medium': ('weight', 'medium'),
'demibold': ('weight', 'demibold'),
'semibold': ('weight', 'semibold'),
'bold': ('weight', 'bold'),
'extrabold': ('weight', 'extra bold'),
'black': ('weight', 'black'),
'heavy': ('weight', 'heavy'),
'roman': ('slant', 'normal'),
'italic': ('slant', 'italic'),
'oblique': ('slant', 'oblique'),
'ultracondensed': ('width', 'ultra-condensed'),
'extracondensed': ('width', 'extra-condensed'),
'condensed': ('width', 'condensed'),
'semicondensed': ('width', 'semi-condensed'),
'expanded': ('width', 'expanded'),
'extraexpanded': ('width', 'extra-expanded'),
'ultraexpanded': ('width', 'ultra-expanded'),
}
@lru_cache # The parser instance is a singleton.
def _make_fontconfig_parser():
def comma_separated(elem):
return elem + ZeroOrMore(Suppress(",") + elem)
family = Regex(fr"([^{_family_punc}]|(\\[{_family_punc}]))*")
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)")
name = Regex(r"[a-z]+")
value = Regex(fr"([^{_value_punc}]|(\\[{_value_punc}]))*")
prop = Group((name + Suppress("=") + comma_separated(value)) | oneOf(_CONSTANTS))
return (
Optional(comma_separated(family)("families"))
+ Optional("-" + comma_separated(size)("sizes"))
+ ZeroOrMore(":" + prop("properties*"))
+ StringEnd()
)
# `parse_fontconfig_pattern` is a bottleneck during the tests because it is
# repeatedly called when the rcParams are reset (to validate the default
# fonts). In practice, the cache size doesn't grow beyond a few dozen entries
# during the test suite.
@lru_cache
def parse_fontconfig_pattern(pattern):
"""
Parse a fontconfig *pattern* into a dict that can initialize a
`.font_manager.FontProperties` object.
"""
parser = _make_fontconfig_parser()
try:
parse = parser.parseString(pattern)
except ParseException as err:
# explain becomes a plain method on pyparsing 3 (err.explain(0)).
raise ValueError("\n" + ParseException.explain(err, 0)) from None
parser.resetCache()
props = {}
if "families" in parse:
props["family"] = [*map(_family_unescape, parse["families"])]
if "sizes" in parse:
props["size"] = [*parse["sizes"]]
for prop in parse.get("properties", []):
if len(prop) == 1:
prop = _CONSTANTS[prop[0]]
k, *v = prop
props.setdefault(k, []).extend(map(_value_unescape, v))
return props
def generate_fontconfig_pattern(d):
"""Convert a `.FontProperties` to a fontconfig pattern string."""
kvs = [(k, getattr(d, f"get_{k}")())
for k in ["style", "variant", "weight", "stretch", "file", "size"]]
# Families is given first without a leading keyword. Other entries (which
# are necessarily scalar) are given as key=value, skipping Nones.
return (",".join(_family_escape(f) for f in d.get_family())
+ "".join(f":{k}={_value_escape(str(v))}"
for k, v in kvs if v is not None))
"""
Internal debugging utilities, that are not expected to be used in the rest of
the codebase.
WARNING: Code in this module may change without prior notice!
"""
from io import StringIO
from pathlib import Path
import subprocess
from matplotlib.transforms import TransformNode
def graphviz_dump_transform(transform, dest, *, highlight=None):
"""
Generate a graphical representation of the transform tree for *transform*
using the :program:`dot` program (which this function depends on). The
output format (png, dot, etc.) is determined from the suffix of *dest*.
Parameters
----------
transform : `~matplotlib.transform.Transform`
The represented transform.
dest : str
Output filename. The extension must be one of the formats supported
by :program:`dot`, e.g. png, svg, dot, ...
(see https://www.graphviz.org/doc/info/output.html).
highlight : list of `~matplotlib.transform.Transform` or None
The transforms in the tree to be drawn in bold.
If *None*, *transform* is highlighted.
"""
if highlight is None:
highlight = [transform]
seen = set()
def recurse(root, buf):
if id(root) in seen:
return
seen.add(id(root))
props = {}
label = type(root).__name__
if root._invalid:
label = f'[{label}]'
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(map('{0[0]}={0[1]}'.format, props.items()))
buf.write(f'{id(root)} [{props}];\n')
for key, val in vars(root).items():
if isinstance(val, TransformNode) and id(root) in val._parents:
buf.write(f'"{id(root)}" -> "{id(val)}" '
f'[label="{key}", fontsize=10];\n')
recurse(val, buf)
buf = StringIO()
buf.write('digraph G {\n')
recurse(transform, buf)
buf.write('}\n')
subprocess.run(
['dot', '-T', Path(dest).suffix[1:], '-o', dest],
input=buf.getvalue().encode('utf-8'), check=True)
venv\Lib\site-packages\matplotlib\_layoutgrid.py
"""
A layoutgrid is a nrows by ncols set of boxes, meant to be used by
`._constrained_layout`, each box is analogous to a subplotspec element of
a gridspec.
Each box is defined by left[ncols], right[ncols], bottom[nrows] and top[nrows],
and by two editable margins for each side. The main margin gets its value
set by the size of ticklabels, titles, etc on each Axes that is in the figure.
The outer margin is the padding around the Axes, and space for any
colorbars.
The "inner" widths and heights of these boxes are then constrained to be the
same (relative the values of `width_ratios[ncols]` and `height_ratios[nrows]`).
The layoutgrid is then constrained to be contained within a parent layoutgrid,
its column(s) and row(s) specified when it is created.
"""
import itertools
import kiwisolver as kiwi
import logging
import numpy as np
import matplotlib as mpl
import matplotlib.patches as mpatches
from matplotlib.transforms import Bbox
_log = logging.getLogger(__name__)
class LayoutGrid:
"""
Analogous to a gridspec, and contained in another LayoutGrid.
"""
def __init__(self, parent=None, parent_pos=(0, 0),
parent_inner=False, name='', ncols=1, nrows=1,
h_pad=None, w_pad=None, width_ratios=None,
height_ratios=None):
Variable = kiwi.Variable
self.parent_pos = parent_pos
self.parent_inner = parent_inner
self.name = name + seq_id()
if isinstance(parent, LayoutGrid):
self.name = f'{parent.name}.{self.name}'
self.nrows = nrows
self.ncols = ncols
self.height_ratios = np.atleast_1d(height_ratios)
if height_ratios is None:
self.height_ratios = np.ones(nrows)
self.width_ratios = np.atleast_1d(width_ratios)
if width_ratios is None:
self.width_ratios = np.ones(ncols)
sn = self.name + '_'
if not isinstance(parent, LayoutGrid):
# parent can be a rect if not a LayoutGrid
# allows specifying a rectangle to contain the layout.
self.solver = kiwi.Solver()
else:
parent.add_child(self, *parent_pos)
self.solver = parent.solver
# keep track of artist associated w/ this layout. Can be none
self.artists = np.empty((nrows, ncols), dtype=object)
self.children = np.empty((nrows, ncols), dtype=object)
self.margins = {}
self.margin_vals = {}
# all the boxes in each column share the same left/right margins:
for todo in ['left', 'right', 'leftcb', 'rightcb']:
# track the value so we can change only if a margin is larger
# than the current value
self.margin_vals[todo] = np.zeros(ncols)
sol = self.solver
self.lefts = [Variable(f'{sn}lefts[{i}]') for i in range(ncols)]
self.rights = [Variable(f'{sn}rights[{i}]') for i in range(ncols)]
for todo in ['left', 'right', 'leftcb', 'rightcb']:
self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
for i in range(ncols)]
for i in range(ncols):
sol.addEditVariable(self.margins[todo][i], 'strong')
for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
self.margins[todo] = np.empty((nrows), dtype=object)
self.margin_vals[todo] = np.zeros(nrows)
self.bottoms = [Variable(f'{sn}bottoms[{i}]') for i in range(nrows)]
self.tops = [Variable(f'{sn}tops[{i}]') for i in range(nrows)]
for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
for i in range(nrows)]
for i in range(nrows):
sol.addEditVariable(self.margins[todo][i], 'strong')
# set these margins to zero by default. They will be edited as
# children are filled.
self.reset_margins()
self.add_constraints(parent)
self.h_pad = h_pad
self.w_pad = w_pad
def __repr__(self):
str = f'LayoutBox: {self.name:25s} {self.nrows}x{self.ncols},\n'
for i in range(self.nrows):
for j in range(self.ncols):
str += f'{i}, {j}: '\
f'L{self.lefts[j].value():1.3f}, ' \
f'B{self.bottoms[i].value():1.3f}, ' \
f'R{self.rights[j].value():1.3f}, ' \
f'T{self.tops[i].value():1.3f}, ' \
f'ML{self.margins["left"][j].value():1.3f}, ' \
f'MR{self.margins["right"][j].value():1.3f}, ' \
f'MB{self.margins["bottom"][i].value():1.3f}, ' \
f'MT{self.margins["top"][i].value():1.3f}, \n'
return str
def reset_margins(self):
"""
Reset all the margins to zero. Must do this after changing
figure size, for instance, because the relative size of the
axes labels etc changes.
"""
for todo in ['left', 'right', 'bottom', 'top',
'leftcb', 'rightcb', 'bottomcb', 'topcb']:
self.edit_margins(todo, 0.0)
def add_constraints(self, parent):
# define self-consistent constraints
self.hard_constraints()
# define relationship with parent layoutgrid:
self.parent_constraints(parent)
# define relative widths of the grid cells to each other
# and stack horizontally and vertically.
self.grid_constraints()
def hard_constraints(self):
"""
These are the redundant constraints, plus ones that make the
rest of the code easier.
"""
for i in range(self.ncols):
hc = [self.rights[i] >= self.lefts[i],
(self.rights[i] - self.margins['right'][i] -
self.margins['rightcb'][i] >=
self.lefts[i] - self.margins['left'][i] -
self.margins['leftcb'][i])
]
for c in hc:
self.solver.addConstraint(c | 'required')
for i in range(self.nrows):
hc = [self.tops[i] >= self.bottoms[i],
(self.tops[i] - self.margins['top'][i] -
self.margins['topcb'][i] >=
self.bottoms[i] - self.margins['bottom'][i] -
self.margins['bottomcb'][i])
]
for c in hc:
self.solver.addConstraint(c | 'required')
def add_child(self, child, i=0, j=0):
# np.ix_ returns the cross product of i and j indices
self.children[np.ix_(np.atleast_1d(i), np.atleast_1d(j))] = child
def parent_constraints(self, parent):
# constraints that are due to the parent...
# i.e. the first column's left is equal to the
# parent's left, the last column right equal to the
# parent's right...
if not isinstance(parent, LayoutGrid):
# specify a rectangle in figure coordinates
hc = [self.lefts[0] == parent[0],
self.rights[-1] == parent[0] + parent[2],
# top and bottom reversed order...
self.tops[0] == parent[1] + parent[3],
self.bottoms[-1] == parent[1]]
else:
rows, cols = self.parent_pos
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
left = parent.lefts[cols[0]]
right = parent.rights[cols[-1]]
top = parent.tops[rows[0]]
bottom = parent.bottoms[rows[-1]]
if self.parent_inner:
# the layout grid is contained inside the inner
# grid of the parent.
left += parent.margins['left'][cols[0]]
left += parent.margins['leftcb'][cols[0]]
right -= parent.margins['right'][cols[-1]]
right -= parent.margins['rightcb'][cols[-1]]
top -= parent.margins['top'][rows[0]]
top -= parent.margins['topcb'][rows[0]]
bottom += parent.margins['bottom'][rows[-1]]
bottom += parent.margins['bottomcb'][rows[-1]]
hc = [self.lefts[0] == left,
self.rights[-1] == right,
# from top to bottom
self.tops[0] == top,
self.bottoms[-1] == bottom]
for c in hc:
self.solver.addConstraint(c | 'required')
def grid_constraints(self):
# constrain the ratio of the inner part of the grids
# to be the same (relative to width_ratios)
# constrain widths:
w = (self.rights[0] - self.margins['right'][0] -
self.margins['rightcb'][0])
w = (w - self.lefts[0] - self.margins['left'][0] -
self.margins['leftcb'][0])
w0 = w / self.width_ratios[0]
# from left to right
for i in range(1, self.ncols):
w = (self.rights[i] - self.margins['right'][i] -
self.margins['rightcb'][i])
w = (w - self.lefts[i] - self.margins['left'][i] -
self.margins['leftcb'][i])
c = (w == w0 * self.width_ratios[i])
self.solver.addConstraint(c | 'strong')
# constrain the grid cells to be directly next to each other.
c = (self.rights[i - 1] == self.lefts[i])
self.solver.addConstraint(c | 'strong')
# constrain heights:
h = self.tops[0] - self.margins['top'][0] - self.margins['topcb'][0]
h = (h - self.bottoms[0] - self.margins['bottom'][0] -
self.margins['bottomcb'][0])
h0 = h / self.height_ratios[0]
# from top to bottom:
for i in range(1, self.nrows):
h = (self.tops[i] - self.margins['top'][i] -
self.margins['topcb'][i])
h = (h - self.bottoms[i] - self.margins['bottom'][i] -
self.margins['bottomcb'][i])
c = (h == h0 * self.height_ratios[i])
self.solver.addConstraint(c | 'strong')
# constrain the grid cells to be directly above each other.
c = (self.bottoms[i - 1] == self.tops[i])
self.solver.addConstraint(c | 'strong')
# Margin editing: The margins are variable and meant to
# contain things of a fixed size like axes labels, tick labels, titles
# etc
def edit_margin(self, todo, size, cell):
"""
Change the size of the margin for one cell.
Parameters
----------
todo : string (one of 'left', 'right', 'bottom', 'top')
margin to alter.
size : float
Size of the margin. If it is larger than the existing minimum it
updates the margin size. Fraction of figure size.
cell : int
Cell column or row to edit.
"""
self.solver.suggestValue(self.margins[todo][cell], size)
self.margin_vals[todo][cell] = size
def edit_margin_min(self, todo, size, cell=0):
"""
Change the minimum size of the margin for one cell.
Parameters
----------
todo : string (one of 'left', 'right', 'bottom', 'top')
margin to alter.
size : float
Minimum size of the margin . If it is larger than the
existing minimum it updates the margin size. Fraction of
figure size.
cell : int
Cell column or row to edit.
"""
if size > self.margin_vals[todo][cell]:
self.edit_margin(todo, size, cell)
def edit_margins(self, todo, size):
"""
Change the size of all the margin of all the cells in the layout grid.
Parameters
----------
todo : string (one of 'left', 'right', 'bottom', 'top')
margin to alter.
size : float
Size to set the margins. Fraction of figure size.
"""
for i in range(len(self.margin_vals[todo])):
self.edit_margin(todo, size, i)
def edit_all_margins_min(self, todo, size):
"""
Change the minimum size of all the margin of all
the cells in the layout grid.
Parameters
----------
todo : {'left', 'right', 'bottom', 'top'}
The margin to alter.
size : float
Minimum size of the margin. If it is larger than the
existing minimum it updates the margin size. Fraction of
figure size.
"""
for i in range(len(self.margin_vals[todo])):
self.edit_margin_min(todo, size, i)
def edit_outer_margin_mins(self, margin, ss):
"""
Edit all four margin minimums in one statement.
Parameters
----------
margin : dict
size of margins in a dict with keys 'left', 'right', 'bottom',
'top'
ss : SubplotSpec
defines the subplotspec these margins should be applied to
"""
self.edit_margin_min('left', margin['left'], ss.colspan.start)
self.edit_margin_min('leftcb', margin['leftcb'], ss.colspan.start)
self.edit_margin_min('right', margin['right'], ss.colspan.stop - 1)
self.edit_margin_min('rightcb', margin['rightcb'], ss.colspan.stop - 1)
# rows are from the top down:
self.edit_margin_min('top', margin['top'], ss.rowspan.start)
self.edit_margin_min('topcb', margin['topcb'], ss.rowspan.start)
self.edit_margin_min('bottom', margin['bottom'], ss.rowspan.stop - 1)
self.edit_margin_min('bottomcb', margin['bottomcb'],
ss.rowspan.stop - 1)
def get_margins(self, todo, col):
"""Return the margin at this position"""
return self.margin_vals[todo][col]
def get_outer_bbox(self, rows=0, cols=0):
"""
Return the outer bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
self.lefts[cols[0]].value(),
self.bottoms[rows[-1]].value(),
self.rights[cols[-1]].value(),
self.tops[rows[0]].value())
return bbox
def get_inner_bbox(self, rows=0, cols=0):
"""
Return the inner bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value() +
self.margins['left'][cols[0]].value() +
self.margins['leftcb'][cols[0]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottom'][rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()),
(self.rights[cols[-1]].value() -
self.margins['right'][cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.tops[rows[0]].value() -
self.margins['top'][rows[0]].value() -
self.margins['topcb'][rows[0]].value())
)
return bbox
def get_bbox_for_cb(self, rows=0, cols=0):
"""
Return the bounding box that includes the
decorations but, *not* the colorbar...
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value() +
self.margins['leftcb'][cols[0]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()),
(self.rights[cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.tops[rows[0]].value() -
self.margins['topcb'][rows[0]].value())
)
return bbox
def get_left_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value() +
self.margins['leftcb'][cols[0]].value()),
(self.bottoms[rows[-1]].value()),
(self.lefts[cols[0]].value() +
self.margins['leftcb'][cols[0]].value() +
self.margins['left'][cols[0]].value()),
(self.tops[rows[0]].value()))
return bbox
def get_bottom_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()),
(self.rights[cols[-1]].value()),
(self.bottoms[rows[-1]].value() +
self.margins['bottom'][rows[-1]].value() +
self.margins['bottomcb'][rows[-1]].value()
))
return bbox
def get_right_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.rights[cols[-1]].value() -
self.margins['right'][cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.bottoms[rows[-1]].value()),
(self.rights[cols[-1]].value() -
self.margins['rightcb'][cols[-1]].value()),
(self.tops[rows[0]].value()))
return bbox
def get_top_margin_bbox(self, rows=0, cols=0):
"""
Return the left margin bounding box of the subplot specs
given by rows and cols. rows and cols can be spans.
"""
rows = np.atleast_1d(rows)
cols = np.atleast_1d(cols)
bbox = Bbox.from_extents(
(self.lefts[cols[0]].value()),
(self.tops[rows[0]].value() -
self.margins['topcb'][rows[0]].value()),
(self.rights[cols[-1]].value()),
(self.tops[rows[0]].value() -
self.margins['topcb'][rows[0]].value() -
self.margins['top'][rows[0]].value()))
return bbox
def update_variables(self):
"""
Update the variables for the solver attached to this layoutgrid.
"""
self.solver.updateVariables()
_layoutboxobjnum = itertools.count()
def seq_id():
"""Generate a short sequential id for layoutbox objects."""
return '%06d' % next(_layoutboxobjnum)
def plot_children(fig, lg=None, level=0):
"""Simple plotting to show where boxes are."""
if lg is None:
_layoutgrids = fig.get_layout_engine().execute(fig)
lg = _layoutgrids[fig]
colors = mpl.rcParams["axes.prop_cycle"].by_key()["color"]
col = colors[level]
for i in range(lg.nrows):
for j in range(lg.ncols):
bb = lg.get_outer_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bb.p0, bb.width, bb.height, linewidth=1,
edgecolor='0.7', facecolor='0.7',
alpha=0.2, transform=fig.transFigure,
zorder=-3))
bbi = lg.get_inner_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=2,
edgecolor=col, facecolor='none',
transform=fig.transFigure, zorder=-2))
bbi = lg.get_left_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.5, 0.7, 0.5],
transform=fig.transFigure, zorder=-2))
bbi = lg.get_right_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.7, 0.5, 0.5],
transform=fig.transFigure, zorder=-2))
bbi = lg.get_bottom_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.5, 0.5, 0.7],
transform=fig.transFigure, zorder=-2))
bbi = lg.get_top_margin_bbox(rows=i, cols=j)
fig.add_artist(
mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
edgecolor='none', alpha=0.2,
facecolor=[0.7, 0.2, 0.7],
transform=fig.transFigure, zorder=-2))
for ch in lg.children.flat:
if ch is not None:
plot_children(fig, ch, level=level+1)
venv\Lib\site-packages\matplotlib\_mathtext.py
"""
Implementation details for :mod:`.mathtext`.
"""
from __future__ import annotations
import abc
import copy
import enum
import functools
import logging
import os
import re
import types
import unicodedata
import string
import typing as T
from typing import NamedTuple
import numpy as np
from pyparsing import (
Empty, Forward, Literal, NotAny, oneOf, OneOrMore, Optional,
ParseBaseException, ParseException, ParseExpression, ParseFatalException,
ParserElement, ParseResults, QuotedString, Regex, StringEnd, ZeroOrMore,
pyparsing_common, Group)
import matplotlib as mpl
from . import cbook
from ._mathtext_data import (
latex_to_bakoma, stix_glyph_fixes, stix_virtual_fonts, tex2uni)
from .font_manager import FontProperties, findfont, get_font
from .ft2font import FT2Font, FT2Image, Kerning, LoadFlags
from packaging.version import parse as parse_version
from pyparsing import __version__ as pyparsing_version
if parse_version(pyparsing_version).major < 3:
from pyparsing import nestedExpr as nested_expr
else:
from pyparsing import nested_expr
if T.TYPE_CHECKING:
from collections.abc import Iterable
from .ft2font import Glyph
ParserElement.enablePackrat()
_log = logging.getLogger("matplotlib.mathtext")
##############################################################################
# FONTS
def get_unicode_index(symbol: str) -> int: # Publicly exported.
r"""
Return the integer index (from the Unicode table) of *symbol*.
Parameters
----------
symbol : str
A single (Unicode) character, a TeX command (e.g. r'\pi') or a Type1
symbol name (e.g. 'phi').
"""
try: # This will succeed if symbol is a single Unicode char
return ord(symbol)
except TypeError:
pass
try: # Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError as err:
raise ValueError(
f"{symbol!r} is not a valid Unicode character or TeX/Type1 symbol"
) from err
class VectorParse(NamedTuple):
"""
The namedtuple type returned by ``MathTextParser("path").parse(...)``.
Attributes
----------
width, height, depth : float
The global metrics.
glyphs : list
The glyphs including their positions.
rect : list
The list of rectangles.
"""
width: float
height: float
depth: float
glyphs: list[tuple[FT2Font, float, int, float, float]]
rects: list[tuple[float, float, float, float]]
VectorParse.__module__ = "matplotlib.mathtext"
class RasterParse(NamedTuple):
"""
The namedtuple type returned by ``MathTextParser("agg").parse(...)``.
Attributes
----------
ox, oy : float
The offsets are always zero.
width, height, depth : float
The global metrics.
image : FT2Image
A raster image.
"""
ox: float
oy: float
width: float
height: float
depth: float
image: FT2Image
RasterParse.__module__ = "matplotlib.mathtext"
class Output:
r"""
Result of `ship`\ping a box: lists of positioned glyphs and rectangles.
This class is not exposed to end users, but converted to a `VectorParse` or
a `RasterParse` by `.MathTextParser.parse`.
"""
def __init__(self, box: Box):
self.box = box
self.glyphs: list[tuple[float, float, FontInfo]] = [] # (ox, oy, info)
self.rects: list[tuple[float, float, float, float]] = [] # (x1, y1, x2, y2)
def to_vector(self) -> VectorParse:
w, h, d = map(
np.ceil, [self.box.width, self.box.height, self.box.depth])
gs = [(info.font, info.fontsize, info.num, ox, h - oy + info.offset)
for ox, oy, info in self.glyphs]
rs = [(x1, h - y2, x2 - x1, y2 - y1)
for x1, y1, x2, y2 in self.rects]
return VectorParse(w, h + d, d, gs, rs)
def to_raster(self, *, antialiased: bool) -> RasterParse:
# Metrics y's and mathtext y's are oriented in opposite directions,
# hence the switch between ymin and ymax.
xmin = min([*[ox + info.metrics.xmin for ox, oy, info in self.glyphs],
*[x1 for x1, y1, x2, y2 in self.rects], 0]) - 1
ymin = min([*[oy - info.metrics.ymax for ox, oy, info in self.glyphs],
*[y1 for x1, y1, x2, y2 in self.rects], 0]) - 1
xmax = max([*[ox + info.metrics.xmax for ox, oy, info in self.glyphs],
*[x2 for x1, y1, x2, y2 in self.rects], 0]) + 1
ymax = max([*[oy - info.metrics.ymin for ox, oy, info in self.glyphs],
*[y2 for x1, y1, x2, y2 in self.rects], 0]) + 1
w = xmax - xmin
h = ymax - ymin - self.box.depth
d = ymax - ymin - self.box.height
image = FT2Image(int(np.ceil(w)), int(np.ceil(h + max(d, 0))))
# Ideally, we could just use self.glyphs and self.rects here, shifting
# their coordinates by (-xmin, -ymin), but this yields slightly
# different results due to floating point slop; shipping twice is the
# old approach and keeps baseline images backcompat.
shifted = ship(self.box, (-xmin, -ymin))
for ox, oy, info in shifted.glyphs:
info.font.draw_glyph_to_bitmap(
image, int(ox), int(oy - info.metrics.iceberg), info.glyph,
antialiased=antialiased)
for x1, y1, x2, y2 in shifted.rects:
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2
y = int(center - (height + 1) / 2)
else:
y = int(y1)
image.draw_rect_filled(int(x1), y, int(np.ceil(x2)), y + height)
return RasterParse(0, 0, w, h + d, d, image)
class FontMetrics(NamedTuple):
"""
Metrics of a font.
Attributes
----------
advance : float
The advance distance (in points) of the glyph.
height : float
The height of the glyph in points.
width : float
The width of the glyph in points.
xmin, xmax, ymin, ymax : float
The ink rectangle of the glyph.
iceberg : float
The distance from the baseline to the top of the glyph. (This corresponds to
TeX's definition of "height".)
slanted : bool
Whether the glyph should be considered as "slanted" (currently used for kerning
sub/superscripts).
"""
advance: float
height: float
width: float
xmin: float
xmax: float
ymin: float
ymax: float
iceberg: float
slanted: bool
class FontInfo(NamedTuple):
font: FT2Font
fontsize: float
postscript_name: str
metrics: FontMetrics
num: int
glyph: Glyph
offset: float
class Fonts(abc.ABC):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
"""
Parameters
----------
default_font_prop : `~.font_manager.FontProperties`
The default non-math font, or the base font for Unicode (generic)
font rendering.
load_glyph_flags : `.ft2font.LoadFlags`
Flags passed to the glyph loader (e.g. ``FT_Load_Glyph`` and
``FT_Load_Char`` for FreeType-based fonts).
"""
self.default_font_prop = default_font_prop
self.load_glyph_flags = load_glyph_flags
def get_kern(self, font1: str, fontclass1: str, sym1: str, fontsize1: float,
font2: str, fontclass2: str, sym2: str, fontsize2: float,
dpi: float) -> float:
"""
Get the kerning distance for font between *sym1* and *sym2*.
See `~.Fonts.get_metrics` for a detailed description of the parameters.
"""
return 0.
def _get_font(self, font: str) -> FT2Font:
raise NotImplementedError
def _get_info(self, font: str, font_class: str, sym: str, fontsize: float,
dpi: float) -> FontInfo:
raise NotImplementedError
def get_metrics(self, font: str, font_class: str, sym: str, fontsize: float,
dpi: float) -> FontMetrics:
r"""
Parameters
----------
font : str
One of the TeX font names: "tt", "it", "rm", "cal", "sf", "bf",
"default", "regular", "bb", "frak", "scr". "default" and "regular"
are synonyms and use the non-math font.
font_class : str
One of the TeX font names (as for *font*), but **not** "bb",
"frak", or "scr". This is used to combine two font classes. The
only supported combination currently is ``get_metrics("frak", "bf",
...)``.
sym : str
A symbol in raw TeX form, e.g., "1", "x", or "\sigma".
fontsize : float
Font size in points.
dpi : float
Rendering dots-per-inch.
Returns
-------
FontMetrics
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def render_glyph(self, output: Output, ox: float, oy: float, font: str,
font_class: str, sym: str, fontsize: float, dpi: float) -> None:
"""
At position (*ox*, *oy*), draw the glyph specified by the remaining
parameters (see `get_metrics` for their detailed description).
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
output.glyphs.append((ox, oy, info))
def render_rect_filled(self, output: Output,
x1: float, y1: float, x2: float, y2: float) -> None:
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
output.rects.append((x1, y1, x2, y2))
def get_xheight(self, font: str, fontsize: float, dpi: float) -> float:
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font: str, fontsize: float, dpi: float) -> float:
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_sized_alternatives_for_symbol(self, fontname: str,
sym: str) -> list[tuple[str, str]]:
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts, metaclass=abc.ABCMeta):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
super().__init__(default_font_prop, load_glyph_flags)
# Per-instance cache.
self._get_info = functools.cache(self._get_info) # type: ignore[method-assign]
self._fonts = {}
self.fontmap: dict[str | int, str] = {}
filename = findfont(self.default_font_prop)
default_font = get_font(filename)
self._fonts['default'] = default_font
self._fonts['regular'] = default_font
def _get_font(self, font: str | int) -> FT2Font:
if font in self.fontmap:
basename = self.fontmap[font]
else:
# NOTE: An int is only passed by subclasses which have placed int keys into
# `self.fontmap`, so we must cast this to confirm it to typing.
basename = T.cast(str, font)
cached_font = self._fonts.get(basename)
if cached_font is None and os.path.exists(basename):
cached_font = get_font(basename)
self._fonts[basename] = cached_font
self._fonts[cached_font.postscript_name] = cached_font
self._fonts[cached_font.postscript_name.lower()] = cached_font
return T.cast(FT2Font, cached_font) # FIXME: Not sure this is guaranteed.
def _get_offset(self, font: FT2Font, glyph: Glyph, fontsize: float,
dpi: float) -> float:
if font.postscript_name == 'Cmex10':
return (glyph.height / 64 / 2) + (fontsize/3 * dpi/72)
return 0.
def _get_glyph(self, fontname: str, font_class: str,
sym: str) -> tuple[FT2Font, int, bool]:
raise NotImplementedError
# The return value of _get_info is cached per-instance.
def _get_info(self, fontname: str, font_class: str, sym: str, fontsize: float,
dpi: float) -> FontInfo:
font, num, slanted = self._get_glyph(fontname, font_class, sym)
font.set_size(fontsize, dpi)
glyph = font.load_char(num, flags=self.load_glyph_flags)
xmin, ymin, xmax, ymax = (val / 64 for val in glyph.bbox)
offset = self._get_offset(font, glyph, fontsize, dpi)
metrics = FontMetrics(
advance=glyph.linearHoriAdvance / 65536,
height=glyph.height / 64,
width=glyph.width / 64,
xmin=xmin,
xmax=xmax,
ymin=ymin + offset,
ymax=ymax + offset,
# iceberg is the equivalent of TeX's "height"
iceberg=glyph.horiBearingY / 64 + offset,
slanted=slanted
)
return FontInfo(
font=font,
fontsize=fontsize,
postscript_name=font.postscript_name,
metrics=metrics,
num=num,
glyph=glyph,
offset=offset
)
def get_xheight(self, fontname: str, fontsize: float, dpi: float) -> float:
font = self._get_font(fontname)
font.set_size(fontsize, dpi)
pclt = font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(
fontname, mpl.rcParams['mathtext.default'], 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font: str, fontsize: float, dpi: float) -> float:
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1: str, fontclass1: str, sym1: str, fontsize1: float,
font2: str, fontclass2: str, sym2: str, fontsize2: float,
dpi: float) -> float:
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, Kerning.DEFAULT) / 64
return super().get_kern(font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = {
'cal': 'cmsy10',
'rm': 'cmr10',
'tt': 'cmtt10',
'it': 'cmmi10',
'bf': 'cmb10',
'sf': 'cmss10',
'ex': 'cmex10',
}
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
self._stix_fallback = StixFonts(default_font_prop, load_glyph_flags)
super().__init__(default_font_prop, load_glyph_flags)
for key, val in self._fontmap.items():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname: str, font_class: str,
sym: str) -> tuple[FT2Font, int, bool]:
font = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
font = self._get_font(basename)
elif len(sym) == 1:
slanted = (fontname == "it")
font = self._get_font(fontname)
if font is not None:
num = ord(sym)
if font is not None and font.get_char_index(num) != 0:
return font, num, slanted
else:
return self._stix_fallback._get_glyph(fontname, font_class, sym)
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(': [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')': [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{': [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}': [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've omitted it for both '[' and ']'
'[': [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']': [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor': [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor': [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil': [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil': [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle': [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle': [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__': [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/': [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat': [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<': [('cal', 'h'), ('ex', 'D')],
r'>': [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [(r'\leftparen', '('),
(r'\rightparen', ')'),
(r'\leftbrace', '{'),
(r'\rightbrace', '}'),
(r'\leftbracket', '['),
(r'\rightbracket', ']'),
(r'\{', '{'),
(r'\}', '}'),
(r'\[', '['),
(r'\]', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname: str,
sym: str) -> list[tuple[str, str]]:
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol cannot be found in the font.
"""
# Some glyphs are not present in the `cmr10` font, and must be brought in
# from `cmsy10`. Map the Unicode indices of those glyphs to the indices at
# which they are found in `cmsy10`.
_cmr10_substitutions = {
0x00D7: 0x00A3, # Multiplication sign.
0x2212: 0x00A1, # Minus sign.
}
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
# This must come first so the backend's owner is set correctly
fallback_rc = mpl.rcParams['mathtext.fallback']
font_cls: type[TruetypeFonts] | None = {
'stix': StixFonts,
'stixsans': StixSansFonts,
'cm': BakomaFonts
}.get(fallback_rc)
self._fallback_font = (font_cls(default_font_prop, load_glyph_flags)
if font_cls else None)
super().__init__(default_font_prop, load_glyph_flags)
for texfont in "cal rm tt it bf sf bfit".split():
prop = mpl.rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
# include STIX sized alternatives for glyphs if fallback is STIX
if isinstance(self._fallback_font, StixFonts):
stixsizedaltfonts = {
0: 'STIXGeneral',
1: 'STIXSizeOneSym',
2: 'STIXSizeTwoSym',
3: 'STIXSizeThreeSym',
4: 'STIXSizeFourSym',
5: 'STIXSizeFiveSym'}
for size, name in stixsizedaltfonts.items():
fullpath = findfont(name)
self.fontmap[size] = fullpath
self.fontmap[name] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname: str, font_class: str,
uniindex: int) -> tuple[str, int]:
return fontname, uniindex
def _get_glyph(self, fontname: str, font_class: str,
sym: str) -> tuple[FT2Font, int, bool]:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
found_symbol = False
_log.warning("No TeX to Unicode mapping for %a.", sym)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
new_fontname = fontname
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
if fontname == 'it' and uniindex < 0x10000:
char = chr(uniindex)
if (unicodedata.category(char)[0] != "L"
or unicodedata.name(char).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
font = self._get_font(new_fontname)
if font is not None:
if (uniindex in self._cmr10_substitutions
and font.family_name == "cmr10"):
font = get_font(
cbook._get_data_path("fonts/ttf/cmsy10.ttf"))
uniindex = self._cmr10_substitutions[uniindex]
glyphindex = font.get_char_index(uniindex)
if glyphindex != 0:
found_symbol = True
if not found_symbol:
if self._fallback_font:
if (fontname in ('it', 'regular')
and isinstance(self._fallback_font, StixFonts)):
fontname = 'rm'
g = self._fallback_font._get_glyph(fontname, font_class, sym)
family = g[0].family_name
if family in list(BakomaFonts._fontmap.values()):
family = "Computer Modern"
_log.info("Substituting symbol %s from %s", sym, family)
return g
else:
if (fontname in ('it', 'regular')
and isinstance(self, StixFonts)):
return self._get_glyph('rm', font_class, sym)
_log.warning("Font %r does not have a glyph for %a [U+%x], "
"substituting with a dummy symbol.",
new_fontname, sym, uniindex)
font = self._get_font('rm')
uniindex = 0xA4 # currency char, for lack of anything better
slanted = False
return font, uniindex, slanted
def get_sized_alternatives_for_symbol(self, fontname: str,
sym: str) -> list[tuple[str, str]]:
if self._fallback_font:
return self._fallback_font.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class DejaVuFonts(UnicodeFonts, metaclass=abc.ABCMeta):
_fontmap: dict[str | int, str] = {}
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
# This must come first so the backend's owner is set correctly
if isinstance(self, DejaVuSerifFonts):
self._fallback_font = StixFonts(default_font_prop, load_glyph_flags)
else:
self._fallback_font = StixSansFonts(default_font_prop, load_glyph_flags)
self.bakoma = BakomaFonts(default_font_prop, load_glyph_flags)
TruetypeFonts.__init__(self, default_font_prop, load_glyph_flags)
# Include Stix sized alternatives for glyphs
self._fontmap.update({
1: 'STIXSizeOneSym',
2: 'STIXSizeTwoSym',
3: 'STIXSizeThreeSym',
4: 'STIXSizeFourSym',
5: 'STIXSizeFiveSym',
})
for key, name in self._fontmap.items():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _get_glyph(self, fontname: str, font_class: str,
sym: str) -> tuple[FT2Font, int, bool]:
# Override prime symbol to use Bakoma.
if sym == r'\prime':
return self.bakoma._get_glyph(fontname, font_class, sym)
else:
# check whether the glyph is available in the display font
uniindex = get_unicode_index(sym)
font = self._get_font('ex')
if font is not None:
glyphindex = font.get_char_index(uniindex)
if glyphindex != 0:
return super()._get_glyph('ex', font_class, sym)
# otherwise return regular glyph
return super()._get_glyph(fontname, font_class, sym)
class DejaVuSerifFonts(DejaVuFonts):
"""
A font handling class for the DejaVu Serif fonts
If a glyph is not found it will fallback to Stix Serif
"""
_fontmap = {
'rm': 'DejaVu Serif',
'it': 'DejaVu Serif:italic',
'bf': 'DejaVu Serif:weight=bold',
'bfit': 'DejaVu Serif:italic:bold',
'sf': 'DejaVu Sans',
'tt': 'DejaVu Sans Mono',
'ex': 'DejaVu Serif Display',
0: 'DejaVu Serif',
}
class DejaVuSansFonts(DejaVuFonts):
"""
A font handling class for the DejaVu Sans fonts
If a glyph is not found it will fallback to Stix Sans
"""
_fontmap = {
'rm': 'DejaVu Sans',
'it': 'DejaVu Sans:italic',
'bf': 'DejaVu Sans:weight=bold',
'bfit': 'DejaVu Sans:italic:bold',
'sf': 'DejaVu Sans',
'tt': 'DejaVu Sans Mono',
'ex': 'DejaVu Sans Display',
0: 'DejaVu Sans',
}
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap: dict[str | int, str] = {
'rm': 'STIXGeneral',
'it': 'STIXGeneral:italic',
'bf': 'STIXGeneral:weight=bold',
'bfit': 'STIXGeneral:italic:bold',
'nonunirm': 'STIXNonUnicode',
'nonuniit': 'STIXNonUnicode:italic',
'nonunibf': 'STIXNonUnicode:weight=bold',
0: 'STIXGeneral',
1: 'STIXSizeOneSym',
2: 'STIXSizeTwoSym',
3: 'STIXSizeThreeSym',
4: 'STIXSizeFourSym',
5: 'STIXSizeFiveSym',
}
_fallback_font = None
_sans = False
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
TruetypeFonts.__init__(self, default_font_prop, load_glyph_flags)
for key, name in self._fontmap.items():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname: str, font_class: str,
uniindex: int) -> tuple[str, int]:
# Handle these "fonts" that are actually embedded in
# other fonts.
font_mapping = stix_virtual_fonts.get(fontname)
if (self._sans and font_mapping is None
and fontname not in ('regular', 'default')):
font_mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if isinstance(font_mapping, dict):
try:
mapping = font_mapping[font_class]
except KeyError:
mapping = font_mapping['rm']
elif isinstance(font_mapping, list):
mapping = font_mapping
else:
mapping = None
if mapping is not None:
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if range[0] <= uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = mpl.rcParams['mathtext.default']
# Fix some incorrect glyphs.
if fontname in ('rm', 'it'):
uniindex = stix_glyph_fixes.get(uniindex, uniindex)
# Handle private use area glyphs
if fontname in ('it', 'rm', 'bf', 'bfit') and 0xe000 <= uniindex <= 0xf8ff:
fontname = 'nonuni' + fontname
return fontname, uniindex
@functools.cache
def get_sized_alternatives_for_symbol( # type: ignore[override]
self,
fontname: str,
sym: str) -> list[tuple[str, str]] | list[tuple[int, str]]:
fixes = {
'\\{': '{', '\\}': '}', '\\[': '[', '\\]': ']',
'<': '\N{MATHEMATICAL LEFT ANGLE BRACKET}',
'>': '\N{MATHEMATICAL RIGHT ANGLE BRACKET}',
}
sym = fixes.get(sym, sym)
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
alternatives = [(i, chr(uniindex)) for i in range(6)
if self._get_font(i).get_char_index(uniindex) != 0]
# The largest size of the radical symbol in STIX has incorrect
# metrics that cause it to be disconnected from the stem.
if sym == r'\__sqrt__':
alternatives = alternatives[:-1]
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (ship())
# Packaging (hpack() and vpack())
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g., node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level.
SHRINK_FACTOR = 0.7
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
class FontConstantsBase:
"""
A set of constants that controls how certain things, such as sub-
and superscripts are laid out. These are all metrics that can't
be reliably retrieved from the font metrics in the font itself.
"""
# Percentage of x-height of additional horiz. space after sub/superscripts
script_space: T.ClassVar[float] = 0.05
# Percentage of x-height that sub/superscripts drop below the baseline
subdrop: T.ClassVar[float] = 0.4
# Percentage of x-height that superscripts are raised from the baseline
sup1: T.ClassVar[float] = 0.7
# Percentage of x-height that subscripts drop below the baseline
sub1: T.ClassVar[float] = 0.3
# Percentage of x-height that subscripts drop below the baseline when a
# superscript is present
sub2: T.ClassVar[float] = 0.5
# Percentage of x-height that sub/superscripts are offset relative to the
# nucleus edge for non-slanted nuclei
delta: T.ClassVar[float] = 0.025
# Additional percentage of last character height above 2/3 of the
# x-height that superscripts are offset relative to the subscript
# for slanted nuclei
delta_slanted: T.ClassVar[float] = 0.2
# Percentage of x-height that superscripts and subscripts are offset for
# integrals
delta_integral: T.ClassVar[float] = 0.1
class ComputerModernFontConstants(FontConstantsBase):
script_space = 0.075
subdrop = 0.2
sup1 = 0.45
sub1 = 0.2
sub2 = 0.3
delta = 0.075
delta_slanted = 0.3
delta_integral = 0.3
class STIXFontConstants(FontConstantsBase):
script_space = 0.1
sup1 = 0.8
sub2 = 0.6
delta = 0.05
delta_slanted = 0.3
delta_integral = 0.3
class STIXSansFontConstants(FontConstantsBase):
script_space = 0.05
sup1 = 0.8
delta_slanted = 0.6
delta_integral = 0.3
class DejaVuSerifFontConstants(FontConstantsBase):
pass
class DejaVuSansFontConstants(FontConstantsBase):
pass
# Maps font family names to the FontConstantBase subclass to use
_font_constant_mapping = {
'DejaVu Sans': DejaVuSansFontConstants,
'DejaVu Sans Mono': DejaVuSansFontConstants,
'DejaVu Serif': DejaVuSerifFontConstants,
'cmb10': ComputerModernFontConstants,
'cmex10': ComputerModernFontConstants,
'cmmi10': ComputerModernFontConstants,
'cmr10': ComputerModernFontConstants,
'cmss10': ComputerModernFontConstants,
'cmsy10': ComputerModernFontConstants,
'cmtt10': ComputerModernFontConstants,
'STIXGeneral': STIXFontConstants,
'STIXNonUnicode': STIXFontConstants,
'STIXSizeFiveSym': STIXFontConstants,
'STIXSizeFourSym': STIXFontConstants,
'STIXSizeThreeSym': STIXFontConstants,
'STIXSizeTwoSym': STIXFontConstants,
'STIXSizeOneSym': STIXFontConstants,
# Map the fonts we used to ship, just for good measure
'Bitstream Vera Sans': DejaVuSansFontConstants,
'Bitstream Vera': DejaVuSansFontConstants,
}
def _get_font_constant_set(state: ParserState) -> type[FontConstantsBase]:
constants = _font_constant_mapping.get(
state.fontset._get_font(state.font).family_name, FontConstantsBase)
# STIX sans isn't really its own fonts, just different code points
# in the STIX fonts, so we have to detect this one separately.
if (constants is STIXFontConstants and
isinstance(state.fontset, StixSansFonts)):
return STIXSansFontConstants
return constants
class Node:
"""A node in the TeX box model."""
def __init__(self) -> None:
self.size = 0
def __repr__(self) -> str:
return type(self).__name__
def get_kerning(self, next: Node | None) -> float:
return 0.0
def shrink(self) -> None:
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def render(self, output: Output, x: float, y: float) -> None:
"""Render this node."""
class Box(Node):
"""A node with a physical location."""
def __init__(self, width: float, height: float, depth: float) -> None:
super().__init__()
self.width = width
self.height = height
self.depth = depth
def shrink(self) -> None:
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def render(self, output: Output, # type: ignore[override]
x1: float, y1: float, x2: float, y2: float) -> None:
pass
class Vbox(Box):
"""A box with only height (zero width)."""
def __init__(self, height: float, depth: float):
super().__init__(0., height, depth)
class Hbox(Box):
"""A box with only width (zero height and depth)."""
def __init__(self, width: float):
super().__init__(width, 0., 0.)
class Char(Node):
"""
A single character.
Unlike TeX, the font information and metrics are stored with each `Char`
to make it easier to lookup the font metrics when needed. Note that TeX
boxes have a width, height, and depth, unlike Type1 and TrueType which use
a full bounding box and an advance in the x-direction. The metrics must
be converted to the TeX model, and the advance (if different from width)
must be converted into a `Kern` node when the `Char` is added to its parent
`Hlist`.
"""
def __init__(self, c: str, state: ParserState):
super().__init__()
self.c = c
self.fontset = state.fontset
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __repr__(self) -> str:
return '`%s`' % self.c
def _update_metrics(self) -> None:
metrics = self._metrics = self.fontset.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self) -> bool:
return self._metrics.slanted
def get_kerning(self, next: Node | None) -> float:
"""
Return the amount of kerning between this and the given character.
This method is called when characters are strung together into `Hlist`
to create `Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.fontset.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, output: Output, x: float, y: float) -> None:
self.fontset.render_glyph(
output, x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self) -> None:
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self) -> None:
metrics = self._metrics = self.fontset.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self) -> None:
super().shrink()
self._update_metrics()
def render(self, output: Output, x: float, y: float) -> None:
self.fontset.render_glyph(
output, x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""A list of nodes (either horizontal or vertical)."""
def __init__(self, elements: T.Sequence[Node]):
super().__init__(0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = [*elements] # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self) -> str:
return '{}[{}]'.format(
super().__repr__(),
self.width, self.height,
self.depth, self.shift_amount,
', '.join([repr(x) for x in self.children]))
def _set_glue(self, x: float, sign: int, totals: list[float],
error_type: str) -> None:
self.glue_order = o = next(
# Highest order of glue used by the members of this list.
(i for i in range(len(totals))[::-1] if totals[i] != 0), 0)
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
_log.warning("%s %s: %r",
error_type, type(self).__name__, self)
def shrink(self) -> None:
for child in self.children:
child.shrink()
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
class Hlist(List):
"""A horizontal list of boxes."""
def __init__(self, elements: T.Sequence[Node], w: float = 0.0,
m: T.Literal['additional', 'exactly'] = 'additional',
do_kern: bool = True):
super().__init__(elements)
if do_kern:
self.kern()
self.hpack(w=w, m=m)
def kern(self) -> None:
"""
Insert `Kern` nodes between `Char` nodes to set kerning.
The `Char` nodes themselves determine the amount of kerning they need
(in `~Char.get_kerning`), and this function just creates the correct
linked list.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
def hpack(self, w: float = 0.0,
m: T.Literal['additional', 'exactly'] = 'additional') -> None:
r"""
Compute the dimensions of the resulting boxes, and adjust the glue if
one of those dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items may
stick out if negative glue is used, if the box is overfull, or if a
``\vbox`` includes other boxes that have been shifted left.
Parameters
----------
w : float, default: 0
A width.
m : {'exactly', 'additional'}, default: 'additional'
Whether to produce a box whose width is 'exactly' *w*; or a box
with the natural width of the contents, plus *w* ('additional').
Notes
-----
The defaults produce a box with the natural width of the contents.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not np.isinf(p.height) and not np.isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overful")
else:
self._set_glue(x, -1, total_shrink, "Underful")
class Vlist(List):
"""A vertical list of boxes."""
def __init__(self, elements: T.Sequence[Node], h: float = 0.0,
m: T.Literal['additional', 'exactly'] = 'additional'):
super().__init__(elements)
self.vpack(h=h, m=m)
def vpack(self, h: float = 0.0,
m: T.Literal['additional', 'exactly'] = 'additional',
l: float = np.inf) -> None:
"""
Compute the dimensions of the resulting boxes, and to adjust the glue
if one of those dimensions is pre-specified.
Parameters
----------
h : float, default: 0
A height.
m : {'exactly', 'additional'}, default: 'additional'
Whether to produce a box whose height is 'exactly' *h*; or a box
with the natural height of the contents, plus *h* ('additional').
l : float, default: np.inf
The maximum height.
Notes
-----
The defaults produce a box with the natural height of the contents.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not np.isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError(
"Internal mathtext error: Char node found in Vlist")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overful")
else:
self._set_glue(x, -1, total_shrink, "Underful")
class Rule(Box):
"""
A solid black rectangle.
It has *width*, *depth*, and *height* fields just as in an `Hlist`.
However, if any of these dimensions is inf, the actual value will be
determined by running the rule up to the boundary of the innermost
enclosing box. This is called a "running dimension". The width is never
running in an `Hlist`; the height and depth are never running in a `Vlist`.
"""
def __init__(self, width: float, height: float, depth: float, state: ParserState):
super().__init__(width, height, depth)
self.fontset = state.fontset
def render(self, output: Output, # type: ignore[override]
x: float, y: float, w: float, h: float) -> None:
self.fontset.render_rect_filled(output, x, y, x + w, y + h)
class Hrule(Rule):
"""Convenience class to create a horizontal rule."""
def __init__(self, state: ParserState, thickness: float | None = None):
if thickness is None:
thickness = state.get_current_underline_thickness()
height = depth = thickness * 0.5
super().__init__(np.inf, height, depth, state)
class Vrule(Rule):
"""Convenience class to create a vertical rule."""
def __init__(self, state: ParserState):
thickness = state.get_current_underline_thickness()
super().__init__(thickness, np.inf, np.inf, state)
class _GlueSpec(NamedTuple):
width: float
stretch: float
stretch_order: int
shrink: float
shrink_order: int
_GlueSpec._named = { # type: ignore[attr-defined]
'fil': _GlueSpec(0., 1., 1, 0., 0),
'fill': _GlueSpec(0., 1., 2, 0., 0),
'filll': _GlueSpec(0., 1., 3, 0., 0),
'neg_fil': _GlueSpec(0., 0., 0, 1., 1),
'neg_fill': _GlueSpec(0., 0., 0, 1., 2),
'neg_filll': _GlueSpec(0., 0., 0, 1., 3),
'empty': _GlueSpec(0., 0., 0, 0., 0),
'ss': _GlueSpec(0., 1., 1, -1., 1),
}
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
``_GlueSpec`` class, which is shared between multiple glue objects.
(This is a memory optimization which probably doesn't matter anymore, but
it's easier to stick to what TeX does.)
"""
def __init__(self,
glue_type: _GlueSpec | T.Literal["fil", "fill", "filll",
"neg_fil", "neg_fill", "neg_filll",
"empty", "ss"]):
super().__init__()
if isinstance(glue_type, str):
glue_spec = _GlueSpec._named[glue_type] # type: ignore[attr-defined]
elif isinstance(glue_type, _GlueSpec):
glue_spec = glue_type
else:
raise ValueError("glue_type must be a glue spec name or instance")
self.glue_spec = glue_spec
def shrink(self) -> None:
super().shrink()
if self.size < NUM_SIZE_LEVELS:
g = self.glue_spec
self.glue_spec = g._replace(width=g.width * SHRINK_FACTOR)
class HCentered(Hlist):
"""
A convenience class to create an `Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements: list[Node]):
super().__init__([Glue('ss'), *elements, Glue('ss')], do_kern=False)
class VCentered(Vlist):
"""
A convenience class to create a `Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements: list[Node]):
super().__init__([Glue('ss'), *elements, Glue('ss')])
class Kern(Node):
"""
A `Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width: float):
super().__init__()
self.width = width
def __repr__(self) -> str:
return "k%.02f" % self.width
def shrink(self) -> None:
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
class AutoHeightChar(Hlist):
"""
A character as close to the given height and depth as possible.
When using a font with multiple height versions of some characters (such as
the BaKoMa fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c: str, height: float, depth: float, state: ParserState,
always: bool = False, factor: float | None = None):
alternatives = state.fontset.get_sized_alternatives_for_symbol(
state.font, c)
xHeight = state.fontset.get_xheight(
state.font, state.fontsize, state.dpi)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
# Ensure that size 0 is chosen when the text is regular sized but
# with descender glyphs by subtracting 0.2 * xHeight
if char.height + char.depth >= target_total - 0.2 * xHeight:
break
shift = 0.0
if state.font != 0 or len(alternatives) == 1:
if factor is None:
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
super().__init__([char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
A character as close to the given width as possible.
When using a font with multiple width versions of some characters (such as
the BaKoMa fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c: str, width: float, state: ParserState, always: bool = False,
char_class: type[Char] = Char):
alternatives = state.fontset.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
super().__init__([char])
self.width = char.width
def ship(box: Box, xy: tuple[float, float] = (0, 0)) -> Output:
"""
Ship out *box* at offset *xy*, converting it to an `Output`.
Since boxes can be inside of boxes inside of boxes, the main work of `ship`
is done by two mutually recursive routines, `hlist_out` and `vlist_out`,
which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store state as it
processes have become local variables here.
"""
ox, oy = xy
cur_v = 0.
cur_h = 0.
off_h = ox
off_v = oy + box.height
output = Output(box)
def clamp(value: float) -> float:
return -1e9 if value < -1e9 else +1e9 if value > +1e9 else value
def hlist_out(box: Hlist) -> None:
nonlocal cur_v, cur_h, off_h, off_v
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = cur_v
left_edge = cur_h
for p in box.children:
if isinstance(p, Char):
p.render(output, cur_h + off_h, cur_v + off_v)
cur_h += p.width
elif isinstance(p, Kern):
cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
cur_h += p.width
else:
edge = cur_h
cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
hlist_out(p)
elif isinstance(p, Vlist):
# p.vpack(box.height + box.depth, 'exactly')
vlist_out(p)
else:
assert False, "unreachable code"
cur_h = edge + p.width
cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if np.isinf(rule_height):
rule_height = box.height
if np.isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
cur_v = base_line + rule_depth
p.render(output,
cur_h + off_h, cur_v + off_v,
rule_width, rule_height)
cur_v = base_line
cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(box.glue_set * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(box.glue_set * cur_glue))
rule_width += cur_g
cur_h += rule_width
def vlist_out(box: Vlist) -> None:
nonlocal cur_v, cur_h, off_h, off_v
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
left_edge = cur_h
cur_v -= box.height
top_edge = cur_v
for p in box.children:
if isinstance(p, Kern):
cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
cur_v += p.height + p.depth
else:
cur_v += p.height
cur_h = left_edge + p.shift_amount
save_v = cur_v
p.width = box.width
if isinstance(p, Hlist):
hlist_out(p)
elif isinstance(p, Vlist):
vlist_out(p)
else:
assert False, "unreachable code"
cur_v = save_v + p.depth
cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if np.isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
cur_v += rule_height
p.render(output,
cur_h + off_h, cur_v + off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(box.glue_set * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(box.glue_set * cur_glue))
rule_height += cur_g
cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError(
"Internal mathtext error: Char node found in vlist")
assert isinstance(box, Hlist)
hlist_out(box)
return output
##############################################################################
# PARSER
def Error(msg: str) -> ParserElement:
"""Helper class to raise parser errors."""
def raise_error(s: str, loc: int, toks: ParseResults) -> T.Any:
raise ParseFatalException(s, loc, msg)
return Empty().setParseAction(raise_error)
class ParserState:
"""
Parser state.
States are pushed and popped from a stack as necessary, and the "current"
state is always at the top of the stack.
Upon entering and leaving a group { } or math/non-math, the stack is pushed
and popped accordingly.
"""
def __init__(self, fontset: Fonts, font: str, font_class: str, fontsize: float,
dpi: float):
self.fontset = fontset
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self) -> ParserState:
return copy.copy(self)
@property
def font(self) -> str:
return self._font
@font.setter
def font(self, name: str) -> None:
if name in ('rm', 'it', 'bf', 'bfit'):
self.font_class = name
self._font = name
def get_current_underline_thickness(self) -> float:
"""Return the underline thickness for this state."""
return self.fontset.get_underline_thickness(
self.font, self.fontsize, self.dpi)
def cmd(expr: str, args: ParserElement) -> ParserElement:
r"""
Helper to define TeX commands.
``cmd("\cmd", args)`` is equivalent to
``"\cmd" - (args | Error("Expected \cmd{arg}{...}"))`` where the names in
the error message are taken from element names in *args*. If *expr*
already includes arguments (e.g. "\cmd{arg}{...}"), then they are stripped
when constructing the parse element, but kept (and *expr* is used as is) in
the error message.
"""
def names(elt: ParserElement) -> T.Generator[str, None, None]:
if isinstance(elt, ParseExpression):
for expr in elt.exprs:
yield from names(expr)
elif elt.resultsName:
yield elt.resultsName
csname = expr.split("{", 1)[0]
err = (csname + "".join("{%s}" % name for name in names(args))
if expr == csname else expr)
return csname - (args | Error(f"Expected {err}"))
class Parser:
"""
A pyparsing-based parser for strings containing math expressions.
Raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few corners.
"""
class _MathStyle(enum.Enum):
DISPLAYSTYLE = 0
TEXTSTYLE = 1
SCRIPTSTYLE = 2
SCRIPTSCRIPTSTYLE = 3
_binary_operators = set(
'+ * - \N{MINUS SIGN}'
r'''
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg
\dotplus \dotminus \Cap
\Cup \barwedge \boxdot
\boxminus \boxplus \boxtimes
\curlyvee \curlywedge \divideontimes
\doublebarwedge \leftthreetimes \rightthreetimes
\slash \veebar \barvee
\cupdot \intercal \amalg
\circledcirc \circleddash \circledast
\boxbar \obar \merge
\minuscolon \dotsminusdots
'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto \vdash
\dashv \dots \doteqdot \leqq
\geqq \lneqq \gneqq \lessgtr
\leqslant \geqslant \eqgtr \eqless
\eqslantless \eqslantgtr \lesseqgtr \backsim
\backsimeq \lesssim \gtrsim \precsim
\precnsim \gnsim \lnsim \succsim
\succnsim \nsim \lesseqqgtr \gtreqqless
\gtreqless \subseteqq \supseteqq \subsetneqq
\supsetneqq \lessapprox \approxeq \gtrapprox
\precapprox \succapprox \precnapprox \succnapprox
\npreccurlyeq \nsucccurlyeq \nsqsubseteq \nsqsupseteq
\sqsubsetneq \sqsupsetneq \nlesssim \ngtrsim
\nlessgtr \ngtrless \lnapprox \gnapprox
\napprox \approxeq \approxident \lll
\ggg \nparallel \Vdash \Vvdash
\nVdash \nvdash \vDash \nvDash
\nVDash \oequal \simneqq \triangle
\triangleq \triangleeq \triangleleft
\triangleright \ntriangleleft \ntriangleright
\trianglelefteq \ntrianglelefteq \trianglerighteq
\ntrianglerighteq \blacktriangleleft \blacktriangleright
\equalparallel \measuredrightangle \varlrtriangle
\Doteq \Bumpeq \Subset \Supset
\backepsilon \because \therefore \bot
\top \bumpeq \circeq \coloneq
\curlyeqprec \curlyeqsucc \eqcirc \eqcolon
\eqsim \fallingdotseq \gtrdot \gtrless
\ltimes \rtimes \lessdot \ne
\ncong \nequiv \ngeq \ngtr
\nleq \nless \nmid \notin
\nprec \nsubset \nsubseteq \nsucc
\nsupset \nsupseteq \pitchfork \preccurlyeq
\risingdotseq \subsetneq \succcurlyeq \supsetneq
\varpropto \vartriangleleft \scurel
\vartriangleright \rightangle \equal \backcong
\eqdef \wedgeq \questeq \between
\veeeq \disin \varisins \isins
\isindot \varisinobar \isinobar \isinvb
\isinE \nisd \varnis \nis
\varniobar \niobar \bagmember \ratio
\Equiv \stareq \measeq \arceq
\rightassert \rightModels \smallin \smallowns
\notsmallowns \nsimeq'''.split())
_arrow_symbols = set(r"""
\leftarrow \longleftarrow \uparrow \Leftarrow \Longleftarrow
\Uparrow \rightarrow \longrightarrow \downarrow \Rightarrow
\Longrightarrow \Downarrow \leftrightarrow \updownarrow
\longleftrightarrow \updownarrow \Leftrightarrow
\Longleftrightarrow \Updownarrow \mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow \leftharpoonup
\rightharpoonup \swarrow \leftharpoondown \rightharpoondown
\nwarrow \rightleftharpoons \leadsto \dashrightarrow
\dashleftarrow \leftleftarrows \leftrightarrows \Lleftarrow
\Rrightarrow \twoheadleftarrow \leftarrowtail \looparrowleft
\leftrightharpoons \curvearrowleft \circlearrowleft \Lsh
\upuparrows \upharpoonleft \downharpoonleft \multimap
\leftrightsquigarrow \rightrightarrows \rightleftarrows
\rightrightarrows \rightleftarrows \twoheadrightarrow
\rightarrowtail \looparrowright \rightleftharpoons
\curvearrowright \circlearrowright \Rsh \downdownarrows
\upharpoonright \downharpoonright \rightsquigarrow \nleftarrow
\nrightarrow \nLeftarrow \nRightarrow \nleftrightarrow
\nLeftrightarrow \to \Swarrow \Searrow \Nwarrow \Nearrow
\leftsquigarrow \overleftarrow \overleftrightarrow \cwopencirclearrow
\downzigzagarrow \cupleftarrow \rightzigzagarrow \twoheaddownarrow
\updownarrowbar \twoheaduparrow \rightarrowbar \updownarrows
\barleftarrow \mapsfrom \mapsdown \mapsup \Ldsh \Rdsh
""".split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set("lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'\int \oint \iint \oiint \iiint \oiiint \iiiint'.split())
_fontnames = set("rm cal it tt sf bf bfit "
"default bb frak scr regular".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambi_delims = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow . \vert \Vert""".split())
_left_delims = set(r"""
( [ \{ < \lfloor \langle \lceil \lbrace \leftbrace \lbrack \leftparen \lgroup
""".split())
_right_delims = set(r"""
) ] \} > \rfloor \rangle \rceil \rbrace \rightbrace \rbrack \rightparen \rgroup
""".split())
_delims = _left_delims | _right_delims | _ambi_delims
_small_greek = set([unicodedata.name(chr(i)).split()[-1].lower() for i in
range(ord('\N{GREEK SMALL LETTER ALPHA}'),
ord('\N{GREEK SMALL LETTER OMEGA}') + 1)])
_latin_alphabets = set(string.ascii_letters)
def __init__(self) -> None:
p = types.SimpleNamespace()
def set_names_and_parse_actions() -> None:
for key, val in vars(p).items():
if not key.startswith('_'):
# Set names on (almost) everything -- very useful for debugging
# token, placeable, and auto_delim are forward references which
# are left without names to ensure useful error messages
if key not in ("token", "placeable", "auto_delim"):
val.setName(key)
# Set actions
if hasattr(self, key):
val.setParseAction(getattr(self, key))
# Root definitions.
# In TeX parlance, a csname is a control sequence name (a "\foo").
def csnames(group: str, names: Iterable[str]) -> Regex:
ends_with_alpha = []
ends_with_nonalpha = []
for name in names:
if name[-1].isalpha():
ends_with_alpha.append(name)
else:
ends_with_nonalpha.append(name)
return Regex(
r"\\(?P<{group}>(?:{alpha})(?![A-Za-z]){additional}{nonalpha})".format(
group=group,
alpha="|".join(map(re.escape, ends_with_alpha)),
additional="|" if ends_with_nonalpha else "",
nonalpha="|".join(map(re.escape, ends_with_nonalpha)),
)
)
p.float_literal = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
p.space = oneOf(self._space_widths)("space")
p.style_literal = oneOf(
[str(e.value) for e in self._MathStyle])("style_literal")
p.symbol = Regex(
r"[a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|\U00000080-\U0001ffff]"
r"|\\[%${}\[\]_|]"
+ r"|\\(?:{})(?![A-Za-z])".format(
"|".join(map(re.escape, tex2uni)))
)("sym").leaveWhitespace()
p.unknown_symbol = Regex(r"\\[A-Za-z]+")("name")
p.font = csnames("font", self._fontnames)
p.start_group = Optional(r"\math" + oneOf(self._fontnames)("font")) + "{"
p.end_group = Literal("}")
p.delim = oneOf(self._delims)
# Mutually recursive definitions. (Minimizing the number of Forward
# elements is important for speed.)
p.auto_delim = Forward()
p.placeable = Forward()
p.named_placeable = Forward()
p.required_group = Forward()
p.optional_group = Forward()
p.token = Forward()
# Workaround for placable being part of a cycle of definitions
# calling `p.placeable("name")` results in a copy, so not guaranteed
# to get the definition added after it is used.
# ref https://github.com/matplotlib/matplotlib/issues/25204
# xref https://github.com/pyparsing/pyparsing/issues/95
p.named_placeable <<= p.placeable
set_names_and_parse_actions() # for mutually recursive definitions.
p.optional_group <<= "{" + ZeroOrMore(p.token)("group") + "}"
p.required_group <<= "{" + OneOrMore(p.token)("group") + "}"
p.customspace = cmd(r"\hspace", "{" + p.float_literal("space") + "}")
p.accent = (
csnames("accent", [*self._accent_map, *self._wide_accents])
- p.named_placeable("sym"))
p.function = csnames("name", self._function_names)
p.group = p.start_group + ZeroOrMore(p.token)("group") + p.end_group
p.unclosed_group = (p.start_group + ZeroOrMore(p.token)("group") + StringEnd())
p.frac = cmd(r"\frac", p.required_group("num") + p.required_group("den"))
p.dfrac = cmd(r"\dfrac", p.required_group("num") + p.required_group("den"))
p.binom = cmd(r"\binom", p.required_group("num") + p.required_group("den"))
p.genfrac = cmd(
r"\genfrac",
"{" + Optional(p.delim)("ldelim") + "}"
+ "{" + Optional(p.delim)("rdelim") + "}"
+ "{" + p.float_literal("rulesize") + "}"
+ "{" + Optional(p.style_literal)("style") + "}"
+ p.required_group("num")
+ p.required_group("den"))
p.sqrt = cmd(
r"\sqrt{value}",
Optional("[" + OneOrMore(NotAny("]") + p.token)("root") + "]")
+ p.required_group("value"))
p.overline = cmd(r"\overline", p.required_group("body"))
p.overset = cmd(
r"\overset",
p.optional_group("annotation") + p.optional_group("body"))
p.underset = cmd(
r"\underset",
p.optional_group("annotation") + p.optional_group("body"))
p.text = cmd(r"\text", QuotedString('{', '\\', endQuoteChar="}"))
p.substack = cmd(r"\substack",
nested_expr(opener="{", closer="}",
content=Group(OneOrMore(p.token)) +
ZeroOrMore(Literal("\\\\").suppress()))("parts"))
p.subsuper = (
(Optional(p.placeable)("nucleus")
+ OneOrMore(oneOf(["_", "^"]) - p.placeable)("subsuper")
+ Regex("'*")("apostrophes"))
| Regex("'+")("apostrophes")
| (p.named_placeable("nucleus") + Regex("'*")("apostrophes"))
)
p.simple = p.space | p.customspace | p.font | p.subsuper
p.token <<= (
p.simple
| p.auto_delim
| p.unclosed_group
| p.unknown_symbol # Must be last
)
p.operatorname = cmd(r"\operatorname", "{" + ZeroOrMore(p.simple)("name") + "}")
p.boldsymbol = cmd(
r"\boldsymbol", "{" + ZeroOrMore(p.simple)("value") + "}")
p.placeable <<= (
p.accent # Must be before symbol as all accents are symbols
| p.symbol # Must be second to catch all named symbols and single
# chars not in a group
| p.function
| p.operatorname
| p.group
| p.frac
| p.dfrac
| p.binom
| p.genfrac
| p.overset
| p.underset
| p.sqrt
| p.overline
| p.text
| p.boldsymbol
| p.substack
)
mdelim = r"\middle" - (p.delim("mdelim") | Error("Expected a delimiter"))
p.auto_delim <<= (
r"\left" - (p.delim("left") | Error("Expected a delimiter"))
+ ZeroOrMore(p.simple | p.auto_delim | mdelim)("mid")
+ r"\right" - (p.delim("right") | Error("Expected a delimiter"))
)
# Leaf definitions.
p.math = OneOrMore(p.token)
p.math_string = QuotedString('$', '\\', unquoteResults=False)
p.non_math = Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace()
p.main = (
p.non_math + ZeroOrMore(p.math_string + p.non_math) + StringEnd()
)
set_names_and_parse_actions() # for leaf definitions.
self._expression = p.main
self._math_expression = p.math
# To add space to nucleus operators after sub/superscripts
self._in_subscript_or_superscript = False
def parse(self, s: str, fonts_object: Fonts, fontsize: float, dpi: float) -> Hlist:
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of `Node` instances.
"""
self._state_stack = [
ParserState(fonts_object, 'default', 'rm', fontsize, dpi)]
self._em_width_cache: dict[tuple[str, float, float], float] = {}
try:
result = self._expression.parseString(s)
except ParseBaseException as err:
# explain becomes a plain method on pyparsing 3 (err.explain(0)).
raise ValueError("\n" + ParseException.explain(err, 0)) from None
self._state_stack = []
self._in_subscript_or_superscript = False
# prevent operator spacing from leaking into a new expression
self._em_width_cache = {}
ParserElement.resetCache()
return T.cast(Hlist, result[0]) # Known return type from main.
def get_state(self) -> ParserState:
"""Get the current `State` of the parser."""
return self._state_stack[-1]
def pop_state(self) -> None:
"""Pop a `State` off of the stack."""
self._state_stack.pop()
def push_state(self) -> None:
"""Push a new `State` onto the stack, copying the current state."""
self._state_stack.append(self.get_state().copy())
def main(self, toks: ParseResults) -> list[Hlist]:
return [Hlist(toks.asList())]
def math_string(self, toks: ParseResults) -> ParseResults:
return self._math_expression.parseString(toks[0][1:-1], parseAll=True)
def math(self, toks: ParseResults) -> T.Any:
hlist = Hlist(toks.asList())
self.pop_state()
return [hlist]
def non_math(self, toks: ParseResults) -> T.Any:
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = mpl.rcParams['mathtext.default']
return [hlist]
float_literal = staticmethod(pyparsing_common.convertToFloat)
def text(self, toks: ParseResults) -> T.Any:
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[1]])
self.pop_state()
return [hlist]
def _make_space(self, percentage: float) -> Kern:
# In TeX, an em (the unit usually used to measure horizontal lengths)
# is not the width of the character 'm'; it is the same in different
# font styles (e.g. roman or italic). Mathtext, however, uses 'm' in
# the italic style so that horizontal spaces don't depend on the
# current font style.
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.fontset.get_metrics(
'it', mpl.rcParams['mathtext.default'], 'm',
state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = {
r'\,': 0.16667, # 3/18 em = 3 mu
r'\thinspace': 0.16667, # 3/18 em = 3 mu
r'\/': 0.16667, # 3/18 em = 3 mu
r'\>': 0.22222, # 4/18 em = 4 mu
r'\:': 0.22222, # 4/18 em = 4 mu
r'\;': 0.27778, # 5/18 em = 5 mu
r'\ ': 0.33333, # 6/18 em = 6 mu
r'~': 0.33333, # 6/18 em = 6 mu, nonbreakable
r'\enspace': 0.5, # 9/18 em = 9 mu
r'\quad': 1, # 1 em = 18 mu
r'\qquad': 2, # 2 em = 36 mu
r'\!': -0.16667, # -3/18 em = -3 mu
}
def space(self, toks: ParseResults) -> T.Any:
num = self._space_widths[toks["space"]]
box = self._make_space(num)
return [box]
def customspace(self, toks: ParseResults) -> T.Any:
return [self._make_space(toks["space"])]
def symbol(self, s: str, loc: int,
toks: ParseResults | dict[str, str]) -> T.Any:
c = toks["sym"]
if c == "-":
# "U+2212 minus sign is the preferred representation of the unary
# and binary minus sign rather than the ASCII-derived U+002D
# hyphen-minus, because minus sign is unambiguous and because it
# is rendered with a more desirable length, usually longer than a
# hyphen." (https://www.unicode.org/reports/tr25/)
c = "\N{MINUS SIGN}"
try:
char = Char(c, self.get_state())
except ValueError as err:
raise ParseFatalException(s, loc,
"Unknown symbol: %s" % c) from err
if c in self._spaced_symbols:
# iterate until we find previous character, needed for cases
# such as $=-2$, ${ -2}$, $ -2$, or $ -2$.
prev_char = next((c for c in s[:loc][::-1] if c != ' '), '')
# Binary operators at start of string should not be spaced
# Also, operators in sub- or superscripts should not be spaced
if (self._in_subscript_or_superscript or (
c in self._binary_operators and (
len(s[:loc].split()) == 0 or prev_char in {
'{', *self._left_delims, *self._relation_symbols}))):
return [char]
else:
return [Hlist([self._make_space(0.2),
char,
self._make_space(0.2)],
do_kern=True)]
elif c in self._punctuation_symbols:
prev_char = next((c for c in s[:loc][::-1] if c != ' '), '')
next_char = next((c for c in s[loc + 1:] if c != ' '), '')
# Do not space commas between brackets
if c == ',':
if prev_char == '{' and next_char == '}':
return [char]
# Do not space dots as decimal separators
if c == '.' and prev_char.isdigit() and next_char.isdigit():
return [char]
else:
return [Hlist([char, self._make_space(0.2)], do_kern=True)]
return [char]
def unknown_symbol(self, s: str, loc: int, toks: ParseResults) -> T.Any:
raise ParseFatalException(s, loc, f"Unknown symbol: {toks['name']}")
_accent_map = {
r'hat': r'\circumflexaccent',
r'breve': r'\combiningbreve',
r'bar': r'\combiningoverline',
r'grave': r'\combininggraveaccent',
r'acute': r'\combiningacuteaccent',
r'tilde': r'\combiningtilde',
r'dot': r'\combiningdotabove',
r'ddot': r'\combiningdiaeresis',
r'dddot': r'\combiningthreedotsabove',
r'ddddot': r'\combiningfourdotsabove',
r'vec': r'\combiningrightarrowabove',
r'"': r'\combiningdiaeresis',
r"`": r'\combininggraveaccent',
r"'": r'\combiningacuteaccent',
r'~': r'\combiningtilde',
r'.': r'\combiningdotabove',
r'^': r'\circumflexaccent',
r'overrightarrow': r'\rightarrow',
r'overleftarrow': r'\leftarrow',
r'mathring': r'\circ',
}
_wide_accents = set(r"widehat widetilde widebar".split())
def accent(self, toks: ParseResults) -> T.Any:
state = self.get_state()
thickness = state.get_current_underline_thickness()
accent = toks["accent"]
sym = toks["sym"]
accent_box: Node
if accent in self._wide_accents:
accent_box = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent_box = Accent(self._accent_map[accent], state)
if accent == 'mathring':
accent_box.shrink()
accent_box.shrink()
centered = HCentered([Hbox(sym.width / 4.0), accent_box])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s: str, loc: int, toks: ParseResults) -> T.Any:
hlist = self.operatorname(s, loc, toks)
hlist.function_name = toks["name"]
return hlist
def operatorname(self, s: str, loc: int, toks: ParseResults) -> T.Any:
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist_list: list[Node] = []
# Change the font of Chars, but leave Kerns alone
name = toks["name"]
for c in name:
if isinstance(c, Char):
c.font = 'rm'
c._update_metrics()
hlist_list.append(c)
elif isinstance(c, str):
hlist_list.append(Char(c, state))
else:
hlist_list.append(c)
next_char_loc = loc + len(name) + 1
if isinstance(name, ParseResults):
next_char_loc += len('operatorname{}')
next_char = next((c for c in s[next_char_loc:] if c != ' '), '')
delimiters = self._delims | {'^', '_'}
if (next_char not in delimiters and
name not in self._overunder_functions):
# Add thin space except when followed by parenthesis, bracket, etc.
hlist_list += [self._make_space(self._space_widths[r'\,'])]
self.pop_state()
# if followed by a super/subscript, set flag to true
# This flag tells subsuper to add space after this operator
if next_char in {'^', '_'}:
self._in_subscript_or_superscript = True
else:
self._in_subscript_or_superscript = False
return Hlist(hlist_list)
def start_group(self, toks: ParseResults) -> T.Any:
self.push_state()
# Deal with LaTeX-style font tokens
if toks.get("font"):
self.get_state().font = toks.get("font")
return []
def group(self, toks: ParseResults) -> T.Any:
grp = Hlist(toks.get("group", []))
return [grp]
def required_group(self, toks: ParseResults) -> T.Any:
return Hlist(toks.get("group", []))
optional_group = required_group
def end_group(self) -> T.Any:
self.pop_state()
return []
def unclosed_group(self, s: str, loc: int, toks: ParseResults) -> T.Any:
raise ParseFatalException(s, len(s), "Expected '}'")
def font(self, toks: ParseResults) -> T.Any:
self.get_state().font = toks["font"]
return []
def is_overunder(self, nucleus: Node) -> bool:
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus: Node) -> bool:
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus: Node) -> bool:
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuper(self, s: str, loc: int, toks: ParseResults) -> T.Any:
nucleus = toks.get("nucleus", Hbox(0))
subsuper = toks.get("subsuper", [])
napostrophes = len(toks.get("apostrophes", []))
if not subsuper and not napostrophes:
return nucleus
sub = super = None
while subsuper:
op, arg, *subsuper = subsuper
if op == '_':
if sub is not None:
raise ParseFatalException("Double subscript")
sub = arg
else:
if super is not None:
raise ParseFatalException("Double superscript")
super = arg
state = self.get_state()
rule_thickness = state.fontset.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.fontset.get_xheight(
state.font, state.fontsize, state.dpi)
if napostrophes:
if super is None:
super = Hlist([])
for i in range(napostrophes):
super.children.extend(self.symbol(s, loc, {"sym": "\\prime"}))
# kern() and hpack() needed to get the metrics right after
# extending
super.kern()
super.hpack()
# Handle over/under symbols, such as sum or prod
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
vgap = rule_thickness * 3.0
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Vbox(0, vgap)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Vbox(0, vgap), hlist])
shift = hlist.height + vgap + nucleus.depth
vlt = Vlist(vlist)
vlt.shift_amount = shift
result = Hlist([vlt])
return [result]
# We remove kerning on the last character for consistency (otherwise
# it will compute kerning based on non-shrunk characters and may put
# them too close together when superscripted)
# We change the width of the last character to match the advance to
# consider some fonts with weird metrics: e.g. stix's f has a width of
# 7.75 and a kerning of -4.0 for an advance of 3.72, and we want to put
# the superscript at the advance
last_char = nucleus
if isinstance(nucleus, Hlist):
new_children = nucleus.children
if len(new_children):
# remove last kern
if (isinstance(new_children[-1], Kern) and
isinstance(new_children[-2], Char)):
new_children = new_children[:-1]
last_char = new_children[-1]
if isinstance(last_char, Char):
last_char.width = last_char._metrics.advance
# create new Hlist without kerning
nucleus = Hlist(new_children, do_kern=False)
else:
if isinstance(nucleus, Char):
last_char.width = last_char._metrics.advance
nucleus = Hlist([nucleus])
# Handle regular sub/superscripts
constants = _get_font_constant_set(state)
lc_height = last_char.height
lc_baseline = 0
if self.is_dropsub(last_char):
lc_baseline = last_char.depth
# Compute kerning for sub and super
superkern = constants.delta * xHeight
subkern = constants.delta * xHeight
if self.is_slanted(last_char):
superkern += constants.delta * xHeight
superkern += (constants.delta_slanted *
(lc_height - xHeight * 2. / 3.))
if self.is_dropsub(last_char):
subkern = (3 * constants.delta -
constants.delta_integral) * lc_height
superkern = (3 * constants.delta +
constants.delta_integral) * lc_height
else:
subkern = 0
x: List
if super is None:
# node757
# Note: One of super or sub must be a Node if we're in this function, but
# mypy can't know this, since it can't interpret pyparsing expressions,
# hence the cast.
x = Hlist([Kern(subkern), T.cast(Node, sub)])
x.shrink()
if self.is_dropsub(last_char):
shift_down = lc_baseline + constants.subdrop * xHeight
else:
shift_down = constants.sub1 * xHeight
x.shift_amount = shift_down
else:
x = Hlist([Kern(superkern), super])
x.shrink()
if self.is_dropsub(last_char):
shift_up = lc_height - constants.subdrop * xHeight
else:
shift_up = constants.sup1 * xHeight
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
y = Hlist([Kern(subkern), sub])
y.shrink()
if self.is_dropsub(last_char):
shift_down = lc_baseline + constants.subdrop * xHeight
else:
shift_down = constants.sub2 * xHeight
# If sub and superscript collide, move super up
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
x = Vlist([
x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
if not self.is_dropsub(last_char):
x.width += constants.script_space * xHeight
# Do we need to add a space after the nucleus?
# To find out, check the flag set by operatorname
spaced_nucleus: list[Node] = [nucleus, x]
if self._in_subscript_or_superscript:
spaced_nucleus += [self._make_space(self._space_widths[r'\,'])]
self._in_subscript_or_superscript = False
result = Hlist(spaced_nucleus)
return [result]
def _genfrac(self, ldelim: str, rdelim: str, rule: float | None, style: _MathStyle,
num: Hlist, den: Hlist) -> T.Any:
state = self.get_state()
thickness = state.get_current_underline_thickness()
for _ in range(style.value):
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width)
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state, rule), # rule
Vbox(0, thickness * 2.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.fontset.get_metrics(
state.font, mpl.rcParams['mathtext.default'],
'=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
result = [Hlist([vlist, Hbox(thickness * 2.)])]
if ldelim or rdelim:
if ldelim == '':
ldelim = '.'
if rdelim == '':
rdelim = '.'
return self._auto_sized_delimiter(ldelim,
T.cast(list[Box | Char | str],
result),
rdelim)
return result
def style_literal(self, toks: ParseResults) -> T.Any:
return self._MathStyle(int(toks["style_literal"]))
def genfrac(self, toks: ParseResults) -> T.Any:
return self._genfrac(
toks.get("ldelim", ""), toks.get("rdelim", ""),
toks["rulesize"], toks.get("style", self._MathStyle.TEXTSTYLE),
toks["num"], toks["den"])
def frac(self, toks: ParseResults) -> T.Any:
return self._genfrac(
"", "", self.get_state().get_current_underline_thickness(),
self._MathStyle.TEXTSTYLE, toks["num"], toks["den"])
def dfrac(self, toks: ParseResults) -> T.Any:
return self._genfrac(
"", "", self.get_state().get_current_underline_thickness(),
self._MathStyle.DISPLAYSTYLE, toks["num"], toks["den"])
def binom(self, toks: ParseResults) -> T.Any:
return self._genfrac(
"(", ")", 0,
self._MathStyle.TEXTSTYLE, toks["num"], toks["den"])
def _genset(self, s: str, loc: int, toks: ParseResults) -> T.Any:
annotation = toks["annotation"]
body = toks["body"]
thickness = self.get_state().get_current_underline_thickness()
annotation.shrink()
centered_annotation = HCentered([annotation])
centered_body = HCentered([body])
width = max(centered_annotation.width, centered_body.width)
centered_annotation.hpack(width, 'exactly')
centered_body.hpack(width, 'exactly')
vgap = thickness * 3
if s[loc + 1] == "u": # \underset
vlist = Vlist([
centered_body, # body
Vbox(0, vgap), # space
centered_annotation # annotation
])
# Shift so the body sits in the same vertical position
vlist.shift_amount = centered_body.depth + centered_annotation.height + vgap
else: # \overset
vlist = Vlist([
centered_annotation, # annotation
Vbox(0, vgap), # space
centered_body # body
])
# To add horizontal gap between symbols: wrap the Vlist into
# an Hlist and extend it with an Hbox(0, horizontal_gap)
return vlist
overset = underset = _genset
def sqrt(self, toks: ParseResults) -> T.Any:
root = toks.get("root")
body = toks["value"]
state = self.get_state()
thickness = state.get_current_underline_thickness()
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(2 * thickness), body, Hbox(2 * thickness)])
rightside = Vlist([Hrule(state), Glue('fill'), padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if not root:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist(root)
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def overline(self, toks: ParseResults) -> T.Any:
body = toks["body"]
state = self.get_state()
thickness = state.get_current_underline_thickness()
height = body.height - body.shift_amount + thickness * 3.0
depth = body.depth + body.shift_amount
# Place overline above body
rightside = Vlist([Hrule(state), Glue('fill'), Hlist([body])])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
hlist = Hlist([rightside])
return [hlist]
def _auto_sized_delimiter(self, front: str,
middle: list[Box | Char | str],
back: str) -> T.Any:
state = self.get_state()
if len(middle):
height = max([x.height for x in middle if not isinstance(x, str)])
depth = max([x.depth for x in middle if not isinstance(x, str)])
factor = None
for idx, el in enumerate(middle):
if el == r'\middle':
c = T.cast(str, middle[idx + 1]) # Should be one of p.delims.
if c != '.':
middle[idx + 1] = AutoHeightChar(
c, height, depth, state, factor=factor)
else:
middle.remove(c)
del middle[idx]
# There should only be \middle and its delimiter as str, which have
# just been removed.
middle_part = T.cast(list[Box | Char], middle)
else:
height = 0
depth = 0
factor = 1.0
middle_part = []
parts: list[Node] = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(
AutoHeightChar(front, height, depth, state, factor=factor))
parts.extend(middle_part)
if back != '.':
parts.append(
AutoHeightChar(back, height, depth, state, factor=factor))
hlist = Hlist(parts)
return hlist
def auto_delim(self, toks: ParseResults) -> T.Any:
return self._auto_sized_delimiter(
toks["left"],
# if "mid" in toks ... can be removed when requiring pyparsing 3.
toks["mid"].asList() if "mid" in toks else [],
toks["right"])
def boldsymbol(self, toks: ParseResults) -> T.Any:
self.push_state()
state = self.get_state()
hlist: list[Node] = []
name = toks["value"]
for c in name:
if isinstance(c, Hlist):
k = c.children[1]
if isinstance(k, Char):
k.font = "bf"
k._update_metrics()
hlist.append(c)
elif isinstance(c, Char):
c.font = "bf"
if (c.c in self._latin_alphabets or
c.c[1:] in self._small_greek):
c.font = "bfit"
c._update_metrics()
c._update_metrics()
hlist.append(c)
else:
hlist.append(c)
self.pop_state()
return Hlist(hlist)
def substack(self, toks: ParseResults) -> T.Any:
parts = toks["parts"]
state = self.get_state()
thickness = state.get_current_underline_thickness()
hlist = [Hlist(k) for k in parts[0]]
max_width = max(map(lambda c: c.width, hlist))
vlist = []
for sub in hlist:
cp = HCentered([sub])
cp.hpack(max_width, 'exactly')
vlist.append(cp)
stack = [val
for pair in zip(vlist, [Vbox(0, thickness * 2)] * len(vlist))
for val in pair]
del stack[-1]
vlt = Vlist(stack)
result = [Hlist([vlt])]
return result
"""
Manage figures for the pyplot interface.
"""
import atexit
from collections import OrderedDict
class Gcf:
"""
Singleton to maintain the relation between figures and their managers, and
keep track of and "active" figure and manager.
The canvas of a figure created through pyplot is associated with a figure
manager, which handles the interaction between the figure and the backend.
pyplot keeps track of figure managers using an identifier, the "figure
number" or "manager number" (which can actually be any hashable value);
this number is available as the :attr:`number` attribute of the manager.
This class is never instantiated; it consists of an `OrderedDict` mapping
figure/manager numbers to managers, and a set of class methods that
manipulate this `OrderedDict`.
Attributes
----------
figs : OrderedDict
`OrderedDict` mapping numbers to managers; the active manager is at the
end.
"""
figs = OrderedDict()
@classmethod
def get_fig_manager(cls, num):
"""
If manager number *num* exists, make it the active one and return it;
otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
@classmethod
def destroy(cls, num):
"""
Destroy manager *num* -- either a manager instance or a manager number.
In the interactive backends, this is bound to the window "destroy" and
"delete" events.
It is recommended to pass a manager instance, to avoid confusion when
two managers share the same number.
"""
if all(hasattr(num, attr) for attr in ["num", "destroy"]):
manager = num
if cls.figs.get(manager.num) is manager:
cls.figs.pop(manager.num)
else:
try:
manager = cls.figs.pop(num)
except KeyError:
return
if hasattr(manager, "_cidgcf"):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
@classmethod
def destroy_fig(cls, fig):
"""Destroy figure *fig*."""
num = next((manager.num for manager in cls.figs.values()
if manager.canvas.figure == fig), None)
if num is not None:
cls.destroy(num)
@classmethod
def destroy_all(cls):
"""Destroy all figures."""
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls.figs.clear()
@classmethod
def has_fignum(cls, num):
"""Return whether figure number *num* exists."""
return num in cls.figs
@classmethod
def get_all_fig_managers(cls):
"""Return a list of figure managers."""
return list(cls.figs.values())
@classmethod
def get_num_fig_managers(cls):
"""Return the number of figures being managed."""
return len(cls.figs)
@classmethod
def get_active(cls):
"""Return the active manager, or *None* if there is no manager."""
return next(reversed(cls.figs.values())) if cls.figs else None
@classmethod
def _set_new_active_manager(cls, manager):
"""Adopt *manager* into pyplot and make it the active manager."""
if not hasattr(manager, "_cidgcf"):
manager._cidgcf = manager.canvas.mpl_connect(
"button_press_event", lambda event: cls.set_active(manager))
fig = manager.canvas.figure
fig._number = manager.num
label = fig.get_label()
if label:
manager.set_window_title(label)
cls.set_active(manager)
@classmethod
def set_active(cls, manager):
"""Make *manager* the active manager."""
cls.figs[manager.num] = manager
cls.figs.move_to_end(manager.num)
@classmethod
def draw_all(cls, force=False):
"""
Redraw all stale managed figures, or, if *force* is True, all managed
figures.
"""
for manager in cls.get_all_fig_managers():
if force or manager.canvas.figure.stale:
manager.canvas.draw_idle()
atexit.register(Gcf.destroy_all)
"""
Low-level text helper utilities.
"""
from __future__ import annotations
import dataclasses
from . import _api
from .ft2font import FT2Font, Kerning, LoadFlags
@dataclasses.dataclass(frozen=True)
class LayoutItem:
ft_object: FT2Font
char: str
glyph_idx: int
x: float
prev_kern: float
def warn_on_missing_glyph(codepoint, fontnames):
_api.warn_external(
f"Glyph {codepoint} "
f"({chr(codepoint).encode('ascii', 'namereplace').decode('ascii')}) "
f"missing from font(s) {fontnames}.")
block = ("Hebrew" if 0x0590 <= codepoint <= 0x05ff else
"Arabic" if 0x0600 <= codepoint <= 0x06ff else
"Devanagari" if 0x0900 <= codepoint <= 0x097f else
"Bengali" if 0x0980 <= codepoint <= 0x09ff else
"Gurmukhi" if 0x0a00 <= codepoint <= 0x0a7f else
"Gujarati" if 0x0a80 <= codepoint <= 0x0aff else
"Oriya" if 0x0b00 <= codepoint <= 0x0b7f else
"Tamil" if 0x0b80 <= codepoint <= 0x0bff else
"Telugu" if 0x0c00 <= codepoint <= 0x0c7f else
"Kannada" if 0x0c80 <= codepoint <= 0x0cff else
"Malayalam" if 0x0d00 <= codepoint <= 0x0d7f else
"Sinhala" if 0x0d80 <= codepoint <= 0x0dff else
None)
if block:
_api.warn_external(
f"Matplotlib currently does not support {block} natively.")
def layout(string, font, *, kern_mode=Kerning.DEFAULT):
"""
Render *string* with *font*.
For each character in *string*, yield a LayoutItem instance. When such an instance
is yielded, the font's glyph is set to the corresponding character.
Parameters
----------
string : str
The string to be rendered.
font : FT2Font
The font.
kern_mode : Kerning
A FreeType kerning mode.
Yields
------
LayoutItem
"""
x = 0
prev_glyph_idx = None
char_to_font = font._get_fontmap(string)
base_font = font
for char in string:
# This has done the fallback logic
font = char_to_font.get(char, base_font)
glyph_idx = font.get_char_index(ord(char))
kern = (
base_font.get_kerning(prev_glyph_idx, glyph_idx, kern_mode) / 64
if prev_glyph_idx is not None else 0.
)
x += kern
glyph = font.load_glyph(glyph_idx, flags=LoadFlags.NO_HINTING)
yield LayoutItem(font, char, glyph_idx, x, kern)
x += glyph.linearHoriAdvance / 65536
prev_glyph_idx = glyph_idx
venv\Lib\site-packages\matplotlib\_tight_bbox.py
"""
Helper module for the *bbox_inches* parameter in `.Figure.savefig`.
"""
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, bbox_inches, fixed_dpi=None):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
old_aspect = []
locator_list = []
sentinel = object()
for ax in fig.axes:
locator = ax.get_axes_locator()
if locator is not None:
ax.apply_aspect(locator(ax, None))
locator_list.append(locator)
current_pos = ax.get_position(original=False).frozen()
ax.set_axes_locator(lambda a, r, _pos=current_pos: _pos)
# override the method that enforces the aspect ratio on the Axes
if 'apply_aspect' in ax.__dict__:
old_aspect.append(ax.apply_aspect)
else:
old_aspect.append(sentinel)
ax.apply_aspect = lambda pos=None: None
def restore_bbox():
for ax, loc, aspect in zip(fig.axes, locator_list, old_aspect):
ax.set_axes_locator(loc)
if aspect is sentinel:
# delete our no-op function which un-hides the original method
del ax.apply_aspect
else:
ax.apply_aspect = aspect
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
if fixed_dpi is None:
fixed_dpi = fig.dpi
tr = Affine2D().scale(fixed_dpi)
dpi_scale = fixed_dpi / fig.dpi
fig.bbox_inches = Bbox.from_bounds(0, 0, *bbox_inches.size)
x0, y0 = tr.transform(bbox_inches.p0)
w1, h1 = fig.bbox.size * dpi_scale
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0, w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
return restore_bbox
def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None):
"""
A function that needs to be called when figure dpi changes during the
drawing (e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(fig, bbox_inches, fixed_dpi)
return bbox_inches, r
"""
Routines to adjust subplot params so that subplots are
nicely fit in the figure. In doing so, only axis labels, tick labels, Axes
titles and offsetboxes that are anchored to Axes are currently considered.
Internally, this module assumes that the margins (left margin, etc.) which are
differences between ``Axes.get_tightbbox`` and ``Axes.bbox`` are independent of
Axes position. This may fail if ``Axes.adjustable`` is ``datalim`` as well as
such cases as when left or right margin are affected by xlabel.
"""
import numpy as np
import matplotlib as mpl
from matplotlib import _api, artist as martist
from matplotlib.font_manager import FontProperties
from matplotlib.transforms import Bbox
def _auto_adjust_subplotpars(
fig, renderer, shape, span_pairs, subplot_list,
ax_bbox_list=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Return a dict of subplot parameters to adjust spacing between subplots
or ``None`` if resulting Axes would have zero height or width.
Note that this function ignores geometry information of subplot itself, but
uses what is given by the *shape* and *subplot_list* parameters. Also, the
results could be incorrect if some subplots have ``adjustable=datalim``.
Parameters
----------
shape : tuple[int, int]
Number of rows and columns of the grid.
span_pairs : list[tuple[slice, slice]]
List of rowspans and colspans occupied by each subplot.
subplot_list : list of subplots
List of subplots that will be used to calculate optimal subplot_params.
pad : float
Padding between the figure edge and the edges of subplots, as a
fraction of the font size.
h_pad, w_pad : float
Padding (height/width) between edges of adjacent subplots, as a
fraction of the font size. Defaults to *pad*.
rect : tuple
(left, bottom, right, top), default: None.
"""
rows, cols = shape
font_size_inch = (FontProperties(
size=mpl.rcParams["font.size"]).get_size_in_points() / 72)
pad_inch = pad * font_size_inch
vpad_inch = h_pad * font_size_inch if h_pad is not None else pad_inch
hpad_inch = w_pad * font_size_inch if w_pad is not None else pad_inch
if len(span_pairs) != len(subplot_list) or len(subplot_list) == 0:
raise ValueError
if rect is None:
margin_left = margin_bottom = margin_right = margin_top = None
else:
margin_left, margin_bottom, _right, _top = rect
margin_right = 1 - _right if _right else None
margin_top = 1 - _top if _top else None
vspaces = np.zeros((rows + 1, cols))
hspaces = np.zeros((rows, cols + 1))
if ax_bbox_list is None:
ax_bbox_list = [
Bbox.union([ax.get_position(original=True) for ax in subplots])
for subplots in subplot_list]
for subplots, ax_bbox, (rowspan, colspan) in zip(
subplot_list, ax_bbox_list, span_pairs):
if all(not ax.get_visible() for ax in subplots):
continue
bb = []
for ax in subplots:
if ax.get_visible():
bb += [martist._get_tightbbox_for_layout_only(ax, renderer)]
tight_bbox_raw = Bbox.union(bb)
tight_bbox = fig.transFigure.inverted().transform_bbox(tight_bbox_raw)
hspaces[rowspan, colspan.start] += ax_bbox.xmin - tight_bbox.xmin # l
hspaces[rowspan, colspan.stop] += tight_bbox.xmax - ax_bbox.xmax # r
vspaces[rowspan.start, colspan] += tight_bbox.ymax - ax_bbox.ymax # t
vspaces[rowspan.stop, colspan] += ax_bbox.ymin - tight_bbox.ymin # b
fig_width_inch, fig_height_inch = fig.get_size_inches()
# margins can be negative for Axes with aspect applied, so use max(, 0) to
# make them nonnegative.
if not margin_left:
margin_left = max(hspaces[:, 0].max(), 0) + pad_inch/fig_width_inch
suplabel = fig._supylabel
if suplabel and suplabel.get_in_layout():
rel_width = fig.transFigure.inverted().transform_bbox(
suplabel.get_window_extent(renderer)).width
margin_left += rel_width + pad_inch/fig_width_inch
if not margin_right:
margin_right = max(hspaces[:, -1].max(), 0) + pad_inch/fig_width_inch
if not margin_top:
margin_top = max(vspaces[0, :].max(), 0) + pad_inch/fig_height_inch
if fig._suptitle and fig._suptitle.get_in_layout():
rel_height = fig.transFigure.inverted().transform_bbox(
fig._suptitle.get_window_extent(renderer)).height
margin_top += rel_height + pad_inch/fig_height_inch
if not margin_bottom:
margin_bottom = max(vspaces[-1, :].max(), 0) + pad_inch/fig_height_inch
suplabel = fig._supxlabel
if suplabel and suplabel.get_in_layout():
rel_height = fig.transFigure.inverted().transform_bbox(
suplabel.get_window_extent(renderer)).height
margin_bottom += rel_height + pad_inch/fig_height_inch
if margin_left + margin_right >= 1:
_api.warn_external('Tight layout not applied. The left and right '
'margins cannot be made large enough to '
'accommodate all Axes decorations.')
return None
if margin_bottom + margin_top >= 1:
_api.warn_external('Tight layout not applied. The bottom and top '
'margins cannot be made large enough to '
'accommodate all Axes decorations.')
return None
kwargs = dict(left=margin_left,
right=1 - margin_right,
bottom=margin_bottom,
top=1 - margin_top)
if cols > 1:
hspace = hspaces[:, 1:-1].max() + hpad_inch / fig_width_inch
# axes widths:
h_axes = (1 - margin_right - margin_left - hspace * (cols - 1)) / cols
if h_axes < 0:
_api.warn_external('Tight layout not applied. tight_layout '
'cannot make Axes width small enough to '
'accommodate all Axes decorations')
return None
else:
kwargs["wspace"] = hspace / h_axes
if rows > 1:
vspace = vspaces[1:-1, :].max() + vpad_inch / fig_height_inch
v_axes = (1 - margin_top - margin_bottom - vspace * (rows - 1)) / rows
if v_axes < 0:
_api.warn_external('Tight layout not applied. tight_layout '
'cannot make Axes height small enough to '
'accommodate all Axes decorations.')
return None
else:
kwargs["hspace"] = vspace / v_axes
return kwargs
def get_subplotspec_list(axes_list, grid_spec=None):
"""
Return a list of subplotspec from the given list of Axes.
For an instance of Axes that does not support subplotspec, None is inserted
in the list.
If grid_spec is given, None is inserted for those not from the given
grid_spec.
"""
subplotspec_list = []
for ax in axes_list:
axes_or_locator = ax.get_axes_locator()
if axes_or_locator is None:
axes_or_locator = ax
if hasattr(axes_or_locator, "get_subplotspec"):
subplotspec = axes_or_locator.get_subplotspec()
if subplotspec is not None:
subplotspec = subplotspec.get_topmost_subplotspec()
gs = subplotspec.get_gridspec()
if grid_spec is not None:
if gs != grid_spec:
subplotspec = None
elif gs.locally_modified_subplot_params():
subplotspec = None
else:
subplotspec = None
subplotspec_list.append(subplotspec)
return subplotspec_list
def get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer,
pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Return subplot parameters for tight-layouted-figure with specified padding.
Parameters
----------
fig : Figure
axes_list : list of Axes
subplotspec_list : list of `.SubplotSpec`
The subplotspecs of each Axes.
renderer : renderer
pad : float
Padding between the figure edge and the edges of subplots, as a
fraction of the font size.
h_pad, w_pad : float
Padding (height/width) between edges of adjacent subplots. Defaults to
*pad*.
rect : tuple (left, bottom, right, top), default: None.
rectangle in normalized figure coordinates
that the whole subplots area (including labels) will fit into.
Defaults to using the entire figure.
Returns
-------
subplotspec or None
subplotspec kwargs to be passed to `.Figure.subplots_adjust` or
None if tight_layout could not be accomplished.
"""
# Multiple Axes can share same subplotspec (e.g., if using axes_grid1);
# we need to group them together.
ss_to_subplots = {ss: [] for ss in subplotspec_list}
for ax, ss in zip(axes_list, subplotspec_list):
ss_to_subplots[ss].append(ax)
if ss_to_subplots.pop(None, None):
_api.warn_external(
"This figure includes Axes that are not compatible with "
"tight_layout, so results might be incorrect.")
if not ss_to_subplots:
return {}
subplot_list = list(ss_to_subplots.values())
ax_bbox_list = [ss.get_position(fig) for ss in ss_to_subplots]
max_nrows = max(ss.get_gridspec().nrows for ss in ss_to_subplots)
max_ncols = max(ss.get_gridspec().ncols for ss in ss_to_subplots)
span_pairs = []
for ss in ss_to_subplots:
# The intent here is to support Axes from different gridspecs where
# one's nrows (or ncols) is a multiple of the other (e.g. 2 and 4),
# but this doesn't actually work because the computed wspace, in
# relative-axes-height, corresponds to different physical spacings for
# the 2-row grid and the 4-row grid. Still, this code is left, mostly
# for backcompat.
rows, cols = ss.get_gridspec().get_geometry()
div_row, mod_row = divmod(max_nrows, rows)
div_col, mod_col = divmod(max_ncols, cols)
if mod_row != 0:
_api.warn_external('tight_layout not applied: number of rows '
'in subplot specifications must be '
'multiples of one another.')
return {}
if mod_col != 0:
_api.warn_external('tight_layout not applied: number of '
'columns in subplot specifications must be '
'multiples of one another.')
return {}
span_pairs.append((
slice(ss.rowspan.start * div_row, ss.rowspan.stop * div_row),
slice(ss.colspan.start * div_col, ss.colspan.stop * div_col)))
kwargs = _auto_adjust_subplotpars(fig, renderer,
shape=(max_nrows, max_ncols),
span_pairs=span_pairs,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad)
# kwargs can be none if tight_layout fails...
if rect is not None and kwargs is not None:
# if rect is given, the whole subplots area (including
# labels) will fit into the rect instead of the
# figure. Note that the rect argument of
# *auto_adjust_subplotpars* specify the area that will be
# covered by the total area of axes.bbox. Thus we call
# auto_adjust_subplotpars twice, where the second run
# with adjusted rect parameters.
left, bottom, right, top = rect
if left is not None:
left += kwargs["left"]
if bottom is not None:
bottom += kwargs["bottom"]
if right is not None:
right -= (1 - kwargs["right"])
if top is not None:
top -= (1 - kwargs["top"])
kwargs = _auto_adjust_subplotpars(fig, renderer,
shape=(max_nrows, max_ncols),
span_pairs=span_pairs,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=(left, bottom, right, top))
return kwargs
venv\Lib\site-packages\matplotlib\_type1font.py
"""
A class representing a Type 1 font.
This version reads pfa and pfb files and splits them for embedding in
pdf files. It also supports SlantFont and ExtendFont transformations,
similarly to pdfTeX and friends. There is no support yet for subsetting.
Usage::
font = Type1Font(filename)
clear_part, encrypted_part, finale = font.parts
slanted_font = font.transform({'slant': 0.167})
extended_font = font.transform({'extend': 1.2})
Sources:
* Adobe Technical Note #5040, Supporting Downloadable PostScript
Language Fonts.
* Adobe Type 1 Font Format, Adobe Systems Incorporated, third printing,
v1.1, 1993. ISBN 0-201-57044-0.
"""
from __future__ import annotations
import binascii
import functools
import logging
import re
import string
import struct
import typing as T
import numpy as np
from matplotlib.cbook import _format_approx
from . import _api
_log = logging.getLogger(__name__)
class _Token:
"""
A token in a PostScript stream.
Attributes
----------
pos : int
Position, i.e. offset from the beginning of the data.
raw : str
Raw text of the token.
kind : str
Description of the token (for debugging or testing).
"""
__slots__ = ('pos', 'raw')
kind = '?'
def __init__(self, pos, raw):
_log.debug('type1font._Token %s at %d: %r', self.kind, pos, raw)
self.pos = pos
self.raw = raw
def __str__(self):
return f"<{self.kind} {self.raw} @{self.pos}>"
def endpos(self):
"""Position one past the end of the token"""
return self.pos + len(self.raw)
def is_keyword(self, *names):
"""Is this a name token with one of the names?"""
return False
def is_slash_name(self):
"""Is this a name token that starts with a slash?"""
return False
def is_delim(self):
"""Is this a delimiter token?"""
return False
def is_number(self):
"""Is this a number token?"""
return False
def value(self):
return self.raw
class _NameToken(_Token):
kind = 'name'
def is_slash_name(self):
return self.raw.startswith('/')
def value(self):
return self.raw[1:]
class _BooleanToken(_Token):
kind = 'boolean'
def value(self):
return self.raw == 'true'
class _KeywordToken(_Token):
kind = 'keyword'
def is_keyword(self, *names):
return self.raw in names
class _DelimiterToken(_Token):
kind = 'delimiter'
def is_delim(self):
return True
def opposite(self):
return {'[': ']', ']': '[',
'{': '}', '}': '{',
'<<': '>>', '>>': '<<'
}[self.raw]
class _WhitespaceToken(_Token):
kind = 'whitespace'
class _StringToken(_Token):
kind = 'string'
_escapes_re = re.compile(r'\\([\\()nrtbf]|[0-7]{1,3})')
_replacements = {'\\': '\\', '(': '(', ')': ')', 'n': '\n',
'r': '\r', 't': '\t', 'b': '\b', 'f': '\f'}
_ws_re = re.compile('[\0\t\r\f\n ]')
@classmethod
def _escape(cls, match):
group = match.group(1)
try:
return cls._replacements[group]
except KeyError:
return chr(int(group, 8))
@functools.lru_cache
def value(self):
if self.raw[0] == '(':
return self._escapes_re.sub(self._escape, self.raw[1:-1])
else:
data = self._ws_re.sub('', self.raw[1:-1])
if len(data) % 2 == 1:
data += '0'
return binascii.unhexlify(data)
class _BinaryToken(_Token):
kind = 'binary'
def value(self):
return self.raw[1:]
class _NumberToken(_Token):
kind = 'number'
def is_number(self):
return True
def value(self):
if '.' not in self.raw:
return int(self.raw)
else:
return float(self.raw)
def _tokenize(data: bytes, skip_ws: bool) -> T.Generator[_Token, int, None]:
"""
A generator that produces _Token instances from Type-1 font code.
The consumer of the generator may send an integer to the tokenizer to
indicate that the next token should be _BinaryToken of the given length.
Parameters
----------
data : bytes
The data of the font to tokenize.
skip_ws : bool
If true, the generator will drop any _WhitespaceTokens from the output.
"""
text = data.decode('ascii', 'replace')
whitespace_or_comment_re = re.compile(r'[\0\t\r\f\n ]+|%[^\r\n]*')
token_re = re.compile(r'/{0,2}[^]\0\t\r\f\n ()<>{}/%[]+')
instring_re = re.compile(r'[()\\]')
hex_re = re.compile(r'^<[0-9a-fA-F\0\t\r\f\n ]*>$')
oct_re = re.compile(r'[0-7]{1,3}')
pos = 0
next_binary: int | None = None
while pos < len(text):
if next_binary is not None:
n = next_binary
next_binary = (yield _BinaryToken(pos, data[pos:pos+n]))
pos += n
continue
match = whitespace_or_comment_re.match(text, pos)
if match:
if not skip_ws:
next_binary = (yield _WhitespaceToken(pos, match.group()))
pos = match.end()
elif text[pos] == '(':
# PostScript string rules:
# - parentheses must be balanced
# - backslashes escape backslashes and parens
# - also codes \n\r\t\b\f and octal escapes are recognized
# - other backslashes do not escape anything
start = pos
pos += 1
depth = 1
while depth:
match = instring_re.search(text, pos)
if match is None:
raise ValueError(
f'Unterminated string starting at {start}')
pos = match.end()
if match.group() == '(':
depth += 1
elif match.group() == ')':
depth -= 1
else: # a backslash
char = text[pos]
if char in r'\()nrtbf':
pos += 1
else:
octal = oct_re.match(text, pos)
if octal:
pos = octal.end()
else:
pass # non-escaping backslash
next_binary = (yield _StringToken(start, text[start:pos]))
elif text[pos:pos + 2] in ('<<', '>>'):
next_binary = (yield _DelimiterToken(pos, text[pos:pos + 2]))
pos += 2
elif text[pos] == '<':
start = pos
try:
pos = text.index('>', pos) + 1
except ValueError as e:
raise ValueError(f'Unterminated hex string starting at {start}'
) from e
if not hex_re.match(text[start:pos]):
raise ValueError(f'Malformed hex string starting at {start}')
next_binary = (yield _StringToken(pos, text[start:pos]))
else:
match = token_re.match(text, pos)
if match:
raw = match.group()
if raw.startswith('/'):
next_binary = (yield _NameToken(pos, raw))
elif match.group() in ('true', 'false'):
next_binary = (yield _BooleanToken(pos, raw))
else:
try:
float(raw)
next_binary = (yield _NumberToken(pos, raw))
except ValueError:
next_binary = (yield _KeywordToken(pos, raw))
pos = match.end()
else:
next_binary = (yield _DelimiterToken(pos, text[pos]))
pos += 1
class _BalancedExpression(_Token):
pass
def _expression(initial, tokens, data):
"""
Consume some number of tokens and return a balanced PostScript expression.
Parameters
----------
initial : _Token
The token that triggered parsing a balanced expression.
tokens : iterator of _Token
Following tokens.
data : bytes
Underlying data that the token positions point to.
Returns
-------
_BalancedExpression
"""
delim_stack = []
token = initial
while True:
if token.is_delim():
if token.raw in ('[', '{'):
delim_stack.append(token)
elif token.raw in (']', '}'):
if not delim_stack:
raise RuntimeError(f"unmatched closing token {token}")
match = delim_stack.pop()
if match.raw != token.opposite():
raise RuntimeError(
f"opening token {match} closed by {token}"
)
if not delim_stack:
break
else:
raise RuntimeError(f'unknown delimiter {token}')
elif not delim_stack:
break
token = next(tokens)
return _BalancedExpression(
initial.pos,
data[initial.pos:token.endpos()].decode('ascii', 'replace')
)
class Type1Font:
"""
A class representing a Type-1 font, for use by backends.
Attributes
----------
parts : tuple
A 3-tuple of the cleartext part, the encrypted part, and the finale of
zeros.
decrypted : bytes
The decrypted form of ``parts[1]``.
prop : dict[str, Any]
A dictionary of font properties. Noteworthy keys include:
- FontName: PostScript name of the font
- Encoding: dict from numeric codes to glyph names
- FontMatrix: bytes object encoding a matrix
- UniqueID: optional font identifier, dropped when modifying the font
- CharStrings: dict from glyph names to byte code
- Subrs: array of byte code subroutines
- OtherSubrs: bytes object encoding some PostScript code
"""
__slots__ = ('parts', 'decrypted', 'prop', '_pos', '_abbr')
# the _pos dict contains (begin, end) indices to parts[0] + decrypted
# so that they can be replaced when transforming the font;
# but since sometimes a definition appears in both parts[0] and decrypted,
# _pos[name] is an array of such pairs
#
# _abbr maps three standard abbreviations to their particular names in
# this font (e.g. 'RD' is named '-|' in some fonts)
def __init__(self, input):
"""
Initialize a Type-1 font.
Parameters
----------
input : str or 3-tuple
Either a pfb file name, or a 3-tuple of already-decoded Type-1
font `~.Type1Font.parts`.
"""
if isinstance(input, tuple) and len(input) == 3:
self.parts = input
else:
with open(input, 'rb') as file:
data = self._read(file)
self.parts = self._split(data)
self.decrypted = self._decrypt(self.parts[1], 'eexec')
self._abbr = {'RD': 'RD', 'ND': 'ND', 'NP': 'NP'}
self._parse()
def _read(self, file):
"""Read the font from a file, decoding into usable parts."""
rawdata = file.read()
if not rawdata.startswith(b'\x80'):
return rawdata
data = b''
while rawdata:
if not rawdata.startswith(b'\x80'):
raise RuntimeError('Broken pfb file (expected byte 128, '
'got %d)' % rawdata[0])
type = rawdata[1]
if type in (1, 2):
length, = struct.unpack('> 8))
key = ((key+byte) * 52845 + 22719) & 0xffff
return bytes(plaintext[ndiscard:])
@staticmethod
def _encrypt(plaintext, key, ndiscard=4):
"""
Encrypt plaintext using the Type-1 font algorithm.
The algorithm is described in Adobe's "Adobe Type 1 Font Format".
The key argument can be an integer, or one of the strings
'eexec' and 'charstring', which map to the key specified for the
corresponding part of Type-1 fonts.
The ndiscard argument should be an integer, usually 4. That
number of bytes is prepended to the plaintext before encryption.
This function prepends NUL bytes for reproducibility, even though
the original algorithm uses random bytes, presumably to avoid
cryptanalysis.
"""
key = _api.check_getitem({'eexec': 55665, 'charstring': 4330}, key=key)
ciphertext = []
for byte in b'\0' * ndiscard + plaintext:
c = byte ^ (key >> 8)
ciphertext.append(c)
key = ((key + c) * 52845 + 22719) & 0xffff
return bytes(ciphertext)
def _parse(self):
"""
Find the values of various font properties. This limited kind
of parsing is described in Chapter 10 "Adobe Type Manager
Compatibility" of the Type-1 spec.
"""
# Start with reasonable defaults
prop = {'Weight': 'Regular', 'ItalicAngle': 0.0, 'isFixedPitch': False,
'UnderlinePosition': -100, 'UnderlineThickness': 50}
pos = {}
data = self.parts[0] + self.decrypted
source = _tokenize(data, True)
while True:
# See if there is a key to be assigned a value
# e.g. /FontName in /FontName /Helvetica def
try:
token = next(source)
except StopIteration:
break
if token.is_delim():
# skip over this - we want top-level keys only
_expression(token, source, data)
if token.is_slash_name():
key = token.value()
keypos = token.pos
else:
continue
# Some values need special parsing
if key in ('Subrs', 'CharStrings', 'Encoding', 'OtherSubrs'):
prop[key], endpos = {
'Subrs': self._parse_subrs,
'CharStrings': self._parse_charstrings,
'Encoding': self._parse_encoding,
'OtherSubrs': self._parse_othersubrs
}[key](source, data)
pos.setdefault(key, []).append((keypos, endpos))
continue
try:
token = next(source)
except StopIteration:
break
if isinstance(token, _KeywordToken):
# constructs like
# FontDirectory /Helvetica known {...} {...} ifelse
# mean the key was not really a key
continue
if token.is_delim():
value = _expression(token, source, data).raw
else:
value = token.value()
# look for a 'def' possibly preceded by access modifiers
try:
kw = next(
kw for kw in source
if not kw.is_keyword('readonly', 'noaccess', 'executeonly')
)
except StopIteration:
break
# sometimes noaccess def and readonly def are abbreviated
if kw.is_keyword('def', self._abbr['ND'], self._abbr['NP']):
prop[key] = value
pos.setdefault(key, []).append((keypos, kw.endpos()))
# detect the standard abbreviations
if value == '{noaccess def}':
self._abbr['ND'] = key
elif value == '{noaccess put}':
self._abbr['NP'] = key
elif value == '{string currentfile exch readstring pop}':
self._abbr['RD'] = key
# Fill in the various *Name properties
if 'FontName' not in prop:
prop['FontName'] = (prop.get('FullName') or
prop.get('FamilyName') or
'Unknown')
if 'FullName' not in prop:
prop['FullName'] = prop['FontName']
if 'FamilyName' not in prop:
extras = ('(?i)([ -](regular|plain|italic|oblique|(semi)?bold|'
'(ultra)?light|extra|condensed))+$')
prop['FamilyName'] = re.sub(extras, '', prop['FullName'])
# Decrypt the encrypted parts
ndiscard = prop.get('lenIV', 4)
cs = prop['CharStrings']
for key, value in cs.items():
cs[key] = self._decrypt(value, 'charstring', ndiscard)
if 'Subrs' in prop:
prop['Subrs'] = [
self._decrypt(value, 'charstring', ndiscard)
for value in prop['Subrs']
]
self.prop = prop
self._pos = pos
def _parse_subrs(self, tokens, _data):
count_token = next(tokens)
if not count_token.is_number():
raise RuntimeError(
f"Token following /Subrs must be a number, was {count_token}"
)
count = count_token.value()
array = [None] * count
next(t for t in tokens if t.is_keyword('array'))
for _ in range(count):
next(t for t in tokens if t.is_keyword('dup'))
index_token = next(tokens)
if not index_token.is_number():
raise RuntimeError(
"Token following dup in Subrs definition must be a "
f"number, was {index_token}"
)
nbytes_token = next(tokens)
if not nbytes_token.is_number():
raise RuntimeError(
"Second token following dup in Subrs definition must "
f"be a number, was {nbytes_token}"
)
token = next(tokens)
if not token.is_keyword(self._abbr['RD']):
raise RuntimeError(
f"Token preceding subr must be {self._abbr['RD']}, "
f"was {token}"
)
binary_token = tokens.send(1+nbytes_token.value())
array[index_token.value()] = binary_token.value()
return array, next(tokens).endpos()
@staticmethod
def _parse_charstrings(tokens, _data):
count_token = next(tokens)
if not count_token.is_number():
raise RuntimeError(
"Token following /CharStrings must be a number, "
f"was {count_token}"
)
count = count_token.value()
charstrings = {}
next(t for t in tokens if t.is_keyword('begin'))
while True:
token = next(t for t in tokens
if t.is_keyword('end') or t.is_slash_name())
if token.raw == 'end':
return charstrings, token.endpos()
glyphname = token.value()
nbytes_token = next(tokens)
if not nbytes_token.is_number():
raise RuntimeError(
f"Token following /{glyphname} in CharStrings definition "
f"must be a number, was {nbytes_token}"
)
next(tokens) # usually RD or |-
binary_token = tokens.send(1+nbytes_token.value())
charstrings[glyphname] = binary_token.value()
@staticmethod
def _parse_encoding(tokens, _data):
# this only works for encodings that follow the Adobe manual
# but some old fonts include non-compliant data - we log a warning
# and return a possibly incomplete encoding
encoding = {}
while True:
token = next(t for t in tokens
if t.is_keyword('StandardEncoding', 'dup', 'def'))
if token.is_keyword('StandardEncoding'):
return _StandardEncoding, token.endpos()
if token.is_keyword('def'):
return encoding, token.endpos()
index_token = next(tokens)
if not index_token.is_number():
_log.warning(
f"Parsing encoding: expected number, got {index_token}"
)
continue
name_token = next(tokens)
if not name_token.is_slash_name():
_log.warning(
f"Parsing encoding: expected slash-name, got {name_token}"
)
continue
encoding[index_token.value()] = name_token.value()
@staticmethod
def _parse_othersubrs(tokens, data):
init_pos = None
while True:
token = next(tokens)
if init_pos is None:
init_pos = token.pos
if token.is_delim():
_expression(token, tokens, data)
elif token.is_keyword('def', 'ND', '|-'):
return data[init_pos:token.endpos()], token.endpos()
def transform(self, effects):
"""
Return a new font that is slanted and/or extended.
Parameters
----------
effects : dict
A dict with optional entries:
- 'slant' : float, default: 0
Tangent of the angle that the font is to be slanted to the
right. Negative values slant to the left.
- 'extend' : float, default: 1
Scaling factor for the font width. Values less than 1 condense
the glyphs.
Returns
-------
`Type1Font`
"""
fontname = self.prop['FontName']
italicangle = self.prop['ItalicAngle']
array = [
float(x) for x in (self.prop['FontMatrix']
.lstrip('[').rstrip(']').split())
]
oldmatrix = np.eye(3, 3)
oldmatrix[0:3, 0] = array[::2]
oldmatrix[0:3, 1] = array[1::2]
modifier = np.eye(3, 3)
if 'slant' in effects:
slant = effects['slant']
fontname += f'_Slant_{int(1000 * slant)}'
italicangle = round(
float(italicangle) - np.arctan(slant) / np.pi * 180,
5
)
modifier[1, 0] = slant
if 'extend' in effects:
extend = effects['extend']
fontname += f'_Extend_{int(1000 * extend)}'
modifier[0, 0] = extend
newmatrix = np.dot(modifier, oldmatrix)
array[::2] = newmatrix[0:3, 0]
array[1::2] = newmatrix[0:3, 1]
fontmatrix = (
f"[{' '.join(_format_approx(x, 6) for x in array)}]"
)
replacements = (
[(x, f'/FontName/{fontname} def')
for x in self._pos['FontName']]
+ [(x, f'/ItalicAngle {italicangle} def')
for x in self._pos['ItalicAngle']]
+ [(x, f'/FontMatrix {fontmatrix} readonly def')
for x in self._pos['FontMatrix']]
+ [(x, '') for x in self._pos.get('UniqueID', [])]
)
data = bytearray(self.parts[0])
data.extend(self.decrypted)
len0 = len(self.parts[0])
for (pos0, pos1), value in sorted(replacements, reverse=True):
data[pos0:pos1] = value.encode('ascii', 'replace')
if pos0 < len(self.parts[0]):
if pos1 >= len(self.parts[0]):
raise RuntimeError(
f"text to be replaced with {value} spans "
"the eexec boundary"
)
len0 += len(value) - pos1 + pos0
data = bytes(data)
return Type1Font((
data[:len0],
self._encrypt(data[len0:], 'eexec'),
self.parts[2]
))
_StandardEncoding = {
**{ord(letter): letter for letter in string.ascii_letters},
0: '.notdef',
32: 'space',
33: 'exclam',
34: 'quotedbl',
35: 'numbersign',
36: 'dollar',
37: 'percent',
38: 'ampersand',
39: 'quoteright',
40: 'parenleft',
41: 'parenright',
42: 'asterisk',
43: 'plus',
44: 'comma',
45: 'hyphen',
46: 'period',
47: 'slash',
48: 'zero',
49: 'one',
50: 'two',
51: 'three',
52: 'four',
53: 'five',
54: 'six',
55: 'seven',
56: 'eight',
57: 'nine',
58: 'colon',
59: 'semicolon',
60: 'less',
61: 'equal',
62: 'greater',
63: 'question',
64: 'at',
91: 'bracketleft',
92: 'backslash',
93: 'bracketright',
94: 'asciicircum',
95: 'underscore',
96: 'quoteleft',
123: 'braceleft',
124: 'bar',
125: 'braceright',
126: 'asciitilde',
161: 'exclamdown',
162: 'cent',
163: 'sterling',
164: 'fraction',
165: 'yen',
166: 'florin',
167: 'section',
168: 'currency',
169: 'quotesingle',
170: 'quotedblleft',
171: 'guillemotleft',
172: 'guilsinglleft',
173: 'guilsinglright',
174: 'fi',
175: 'fl',
177: 'endash',
178: 'dagger',
179: 'daggerdbl',
180: 'periodcentered',
182: 'paragraph',
183: 'bullet',
184: 'quotesinglbase',
185: 'quotedblbase',
186: 'quotedblright',
187: 'guillemotright',
188: 'ellipsis',
189: 'perthousand',
191: 'questiondown',
193: 'grave',
194: 'acute',
195: 'circumflex',
196: 'tilde',
197: 'macron',
198: 'breve',
199: 'dotaccent',
200: 'dieresis',
202: 'ring',
203: 'cedilla',
205: 'hungarumlaut',
206: 'ogonek',
207: 'caron',
208: 'emdash',
225: 'AE',
227: 'ordfeminine',
232: 'Lslash',
233: 'Oslash',
234: 'OE',
235: 'ordmasculine',
241: 'ae',
245: 'dotlessi',
248: 'lslash',
249: 'oslash',
250: 'oe',
251: 'germandbls',
}
venv\Lib\site-packages\matplotlib\_version.py
version = "3.10.5"
venv\Lib\site-packages\matplotlib\__init__.py
"""
An object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
import matplotlib.pyplot as plt
or using ipython::
ipython
at your terminal, followed by::
In [1]: %matplotlib
In [2]: import matplotlib.pyplot as plt
at the ipython shell prompt.
For the most part, direct use of the explicit object-oriented library is
encouraged when programming; the implicit pyplot interface is primarily for
working interactively. The exceptions to this suggestion are the pyplot
functions `.pyplot.figure`, `.pyplot.subplot`, `.pyplot.subplots`, and
`.pyplot.savefig`, which can greatly simplify scripting. See
:ref:`api_interfaces` for an explanation of the tradeoffs between the implicit
and explicit interfaces.
Modules include:
:mod:`matplotlib.axes`
The `~.axes.Axes` class. Most pyplot functions are wrappers for
`~.axes.Axes` methods. The axes module is the highest level of OO
access to the library.
:mod:`matplotlib.figure`
The `.Figure` class.
:mod:`matplotlib.artist`
The `.Artist` base class for all classes that draw things.
:mod:`matplotlib.lines`
The `.Line2D` class for drawing lines and markers.
:mod:`matplotlib.patches`
Classes for drawing polygons.
:mod:`matplotlib.text`
The `.Text` and `.Annotation` classes.
:mod:`matplotlib.image`
The `.AxesImage` and `.FigureImage` classes.
:mod:`matplotlib.collections`
Classes for efficient drawing of groups of lines or polygons.
:mod:`matplotlib.colors`
Color specifications and making colormaps.
:mod:`matplotlib.cm`
Colormaps, and the `.ScalarMappable` mixin class for providing color
mapping functionality to other classes.
:mod:`matplotlib.ticker`
Calculation of tick mark locations and formatting of tick labels.
:mod:`matplotlib.backends`
A subpackage with modules for various GUI libraries and output formats.
The base matplotlib namespace includes:
`~matplotlib.rcParams`
Default configuration settings; their defaults may be overridden using
a :file:`matplotlibrc` file.
`~matplotlib.use`
Setting the Matplotlib backend. This should be called before any
figure is created, because it is not possible to switch between
different GUI backends after that.
The following environment variables can be used to customize the behavior:
:envvar:`MPLBACKEND`
This optional variable can be set to choose the Matplotlib backend. See
:ref:`what-is-a-backend`.
:envvar:`MPLCONFIGDIR`
This is the directory used to store user customizations to
Matplotlib, as well as some caches to improve performance. If
:envvar:`MPLCONFIGDIR` is not defined, :file:`{HOME}/.config/matplotlib`
and :file:`{HOME}/.cache/matplotlib` are used on Linux, and
:file:`{HOME}/.matplotlib` on other platforms, if they are
writable. Otherwise, the Python standard library's `tempfile.gettempdir`
is used to find a base directory in which the :file:`matplotlib`
subdirectory is created.
Matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
__all__ = [
"__bibtex__",
"__version__",
"__version_info__",
"set_loglevel",
"ExecutableNotFoundError",
"get_configdir",
"get_cachedir",
"get_data_path",
"matplotlib_fname",
"MatplotlibDeprecationWarning",
"RcParams",
"rc_params",
"rc_params_from_file",
"rcParamsDefault",
"rcParams",
"rcParamsOrig",
"defaultParams",
"rc",
"rcdefaults",
"rc_file_defaults",
"rc_file",
"rc_context",
"use",
"get_backend",
"interactive",
"is_interactive",
"colormaps",
"multivar_colormaps",
"bivar_colormaps",
"color_sequences",
]
import atexit
from collections import namedtuple
from collections.abc import MutableMapping
import contextlib
import functools
import importlib
import inspect
from inspect import Parameter
import locale
import logging
import os
from pathlib import Path
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
from packaging.version import parse as parse_version
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from . import _api, _version, cbook, _docstring, rcsetup
from matplotlib._api import MatplotlibDeprecationWarning
from matplotlib.rcsetup import cycler # noqa: F401
_log = logging.getLogger(__name__)
__bibtex__ = r"""@Article{Hunter:2007,
Author = {Hunter, J. D.},
Title = {Matplotlib: A 2D graphics environment},
Journal = {Computing in Science \& Engineering},
Volume = {9},
Number = {3},
Pages = {90--95},
abstract = {Matplotlib is a 2D graphics package used for Python
for application development, interactive scripting, and
publication-quality image generation across user
interfaces and operating systems.},
publisher = {IEEE COMPUTER SOC},
year = 2007
}"""
# modelled after sys.version_info
_VersionInfo = namedtuple('_VersionInfo',
'major, minor, micro, releaselevel, serial')
def _parse_to_version_info(version_str):
"""
Parse a version string to a namedtuple analogous to sys.version_info.
See:
https://packaging.pypa.io/en/latest/version.html#packaging.version.parse
https://docs.python.org/3/library/sys.html#sys.version_info
"""
v = parse_version(version_str)
if v.pre is None and v.post is None and v.dev is None:
return _VersionInfo(v.major, v.minor, v.micro, 'final', 0)
elif v.dev is not None:
return _VersionInfo(v.major, v.minor, v.micro, 'alpha', v.dev)
elif v.pre is not None:
releaselevel = {
'a': 'alpha',
'b': 'beta',
'rc': 'candidate'}.get(v.pre[0], 'alpha')
return _VersionInfo(v.major, v.minor, v.micro, releaselevel, v.pre[1])
else:
# fallback for v.post: guess-next-dev scheme from setuptools_scm
return _VersionInfo(v.major, v.minor, v.micro + 1, 'alpha', v.post)
def _get_version():
"""Return the version string used for __version__."""
# Only shell out to a git subprocess if really needed, i.e. when we are in
# a matplotlib git repo but not in a shallow clone, such as those used by
# CI, as the latter would trigger a warning from setuptools_scm.
root = Path(__file__).resolve().parents[2]
if ((root / ".matplotlib-repo").exists()
and (root / ".git").exists()
and not (root / ".git/shallow").exists()):
try:
import setuptools_scm
except ImportError:
pass
else:
return setuptools_scm.get_version(
root=root,
dist_name="matplotlib",
version_scheme="release-branch-semver",
local_scheme="node-and-date",
fallback_version=_version.version,
)
# Get the version from the _version.py file if not in repo or setuptools_scm is
# unavailable.
return _version.version
@_api.caching_module_getattr
class __getattr__:
__version__ = property(lambda self: _get_version())
__version_info__ = property(
lambda self: _parse_to_version_info(self.__version__))
def _check_versions():
# Quickfix to ensure Microsoft Visual C++ redistributable
# DLLs are loaded before importing kiwisolver
from . import ft2font # noqa: F401
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.7"),
("kiwisolver", "1.3.1"),
("numpy", "1.23"),
("pyparsing", "2.3.1"),
]:
module = importlib.import_module(modname)
if parse_version(module.__version__) < parse_version(minver):
raise ImportError(f"Matplotlib requires {modname}>={minver}; "
f"you have {module.__version__}")
_check_versions()
# The decorator ensures this always returns the same handler (and it is only
# attached once).
@functools.cache
def _ensure_handler():
"""
The first time this function is called, attach a `StreamHandler` using the
same format as `logging.basicConfig` to the Matplotlib root logger.
Return this handler every time this function is called.
"""
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(handler)
return handler
def set_loglevel(level):
"""
Configure Matplotlib's logging levels.
Matplotlib uses the standard library `logging` framework under the root
logger 'matplotlib'. This is a helper function to:
- set Matplotlib's root logger level
- set the root logger handler's level, creating the handler
if it does not exist yet
Typically, one should call ``set_loglevel("info")`` or
``set_loglevel("debug")`` to get additional debugging information.
Users or applications that are installing their own logging handlers
may want to directly manipulate ``logging.getLogger('matplotlib')`` rather
than use this function.
Parameters
----------
level : {"notset", "debug", "info", "warning", "error", "critical"}
The log level of the handler.
Notes
-----
The first time this function is called, an additional handler is attached
to Matplotlib's root handler; this handler is reused every time and this
function simply manipulates the logger and handler's level.
"""
_log.setLevel(level.upper())
_ensure_handler().setLevel(level.upper())
def _logged_cached(fmt, func=None):
"""
Decorator that logs a function's return value, and memoizes that value.
After ::
@_logged_cached(fmt)
def func(): ...
the first call to *func* will log its return value at the DEBUG level using
%-format string *fmt*, and memoize it; later calls to *func* will directly
return that value.
"""
if func is None: # Return the actual decorator.
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper(**kwargs):
nonlocal called, ret
if not called:
ret = func(**kwargs)
called = True
_log.debug(fmt, ret)
return ret
return wrapper
_ExecInfo = namedtuple("_ExecInfo", "executable raw_version version")
class ExecutableNotFoundError(FileNotFoundError):
"""
Error raised when an executable that Matplotlib optionally
depends on can't be found.
"""
pass
@functools.cache
def _get_executable_info(name):
"""
Get the version of some executable that Matplotlib optionally depends on.
.. warning::
The list of executables that this function supports is set according to
Matplotlib's internal needs, and may change without notice.
Parameters
----------
name : str
The executable to query. The following values are currently supported:
"dvipng", "gs", "inkscape", "magick", "pdftocairo", "pdftops". This
list is subject to change without notice.
Returns
-------
tuple
A namedtuple with fields ``executable`` (`str`) and ``version``
(`packaging.Version`, or ``None`` if the version cannot be determined).
Raises
------
ExecutableNotFoundError
If the executable is not found or older than the oldest version
supported by Matplotlib. For debugging purposes, it is also
possible to "hide" an executable from Matplotlib by adding it to the
:envvar:`_MPLHIDEEXECUTABLES` environment variable (a comma-separated
list), which must be set prior to any calls to this function.
ValueError
If the executable is not one that we know how to query.
"""
def impl(args, regex, min_ver=None, ignore_exit_code=False):
# Execute the subprocess specified by args; capture stdout and stderr.
# Search for a regex match in the output; if the match succeeds, the
# first group of the match is the version.
# Return an _ExecInfo if the executable exists, and has a version of
# at least min_ver (if set); else, raise ExecutableNotFoundError.
try:
output = subprocess.check_output(
args, stderr=subprocess.STDOUT,
text=True, errors="replace", timeout=30)
except subprocess.CalledProcessError as _cpe:
if ignore_exit_code:
output = _cpe.output
else:
raise ExecutableNotFoundError(str(_cpe)) from _cpe
except subprocess.TimeoutExpired as _te:
msg = f"Timed out running {cbook._pformat_subprocess(args)}"
raise ExecutableNotFoundError(msg) from _te
except OSError as _ose:
raise ExecutableNotFoundError(str(_ose)) from _ose
match = re.search(regex, output)
if match:
raw_version = match.group(1)
version = parse_version(raw_version)
if min_ver is not None and version < parse_version(min_ver):
raise ExecutableNotFoundError(
f"You have {args[0]} version {version} but the minimum "
f"version supported by Matplotlib is {min_ver}")
return _ExecInfo(args[0], raw_version, version)
else:
raise ExecutableNotFoundError(
f"Failed to determine the version of {args[0]} from "
f"{' '.join(args)}, which output {output}")
if name in os.environ.get("_MPLHIDEEXECUTABLES", "").split(","):
raise ExecutableNotFoundError(f"{name} was hidden")
if name == "dvipng":
return impl(["dvipng", "-version"], "(?m)^dvipng(?: .*)? (.+)", "1.6")
elif name == "gs":
execs = (["gswin32c", "gswin64c", "mgs", "gs"] # "mgs" for miktex.
if sys.platform == "win32" else
["gs"])
for e in execs:
try:
return impl([e, "--version"], "(.*)", "9")
except ExecutableNotFoundError:
pass
message = "Failed to find a Ghostscript installation"
raise ExecutableNotFoundError(message)
elif name == "inkscape":
try:
# Try headless option first (needed for Inkscape version < 1.0):
return impl(["inkscape", "--without-gui", "-V"],
"Inkscape ([^ ]*)")
except ExecutableNotFoundError:
pass # Suppress exception chaining.
# If --without-gui is not accepted, we may be using Inkscape >= 1.0 so
# try without it:
return impl(["inkscape", "-V"], "Inkscape ([^ ]*)")
elif name == "magick":
if sys.platform == "win32":
# Check the registry to avoid confusing ImageMagick's convert with
# Windows's builtin convert.exe.
import winreg
binpath = ""
for flag in [0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]:
try:
with winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Imagemagick\Current",
0, winreg.KEY_QUERY_VALUE | flag) as hkey:
binpath = winreg.QueryValueEx(hkey, "BinPath")[0]
except OSError:
pass
path = None
if binpath:
for name in ["convert.exe", "magick.exe"]:
candidate = Path(binpath, name)
if candidate.exists():
path = str(candidate)
break
if path is None:
raise ExecutableNotFoundError(
"Failed to find an ImageMagick installation")
else:
path = "convert"
info = impl([path, "--version"], r"^Version: ImageMagick (\S*)")
if info.raw_version == "7.0.10-34":
# https://github.com/ImageMagick/ImageMagick/issues/2720
raise ExecutableNotFoundError(
f"You have ImageMagick {info.version}, which is unsupported")
return info
elif name == "pdftocairo":
return impl(["pdftocairo", "-v"], "pdftocairo version (.*)")
elif name == "pdftops":
info = impl(["pdftops", "-v"], "^pdftops version (.*)",
ignore_exit_code=True)
if info and not (
3 <= info.version.major or
# poppler version numbers.
parse_version("0.9") <= info.version < parse_version("1.0")):
raise ExecutableNotFoundError(
f"You have pdftops version {info.version} but the minimum "
f"version supported by Matplotlib is 3.0")
return info
else:
raise ValueError(f"Unknown executable: {name!r}")
def _get_xdg_config_dir():
"""
Return the XDG configuration directory, according to the XDG base
directory spec:
https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / ".config")
def _get_xdg_cache_dir():
"""
Return the XDG cache directory, according to the XDG base directory spec:
https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / ".cache")
def _get_config_or_cache_dir(xdg_base_getter):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir)
elif sys.platform.startswith(('linux', 'freebsd')):
# Only call _xdg_base_getter here so that MPLCONFIGDIR is tried first,
# as _xdg_base_getter can throw.
configdir = Path(xdg_base_getter(), "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
# Resolve the path to handle potential issues with inaccessible symlinks.
configdir = configdir.resolve()
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError as exc:
_log.warning("mkdir -p failed for path %s: %s", configdir, exc)
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
_log.warning("%s is not a writable directory", configdir)
# If the config or cache directory cannot be created or is not a writable
# directory, create a temporary one.
try:
tmpdir = tempfile.mkdtemp(prefix="matplotlib-")
except OSError as exc:
raise OSError(
f"Matplotlib requires access to a writable cache directory, but there "
f"was an issue with the default path ({configdir}), and a temporary "
f"directory could not be created; set the MPLCONFIGDIR environment "
f"variable to a writable directory") from exc
os.environ["MPLCONFIGDIR"] = tmpdir
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary cache directory at %s because there was "
"an issue with the default path (%s); it is highly recommended to set the "
"MPLCONFIGDIR environment variable to a writable directory, in particular to "
"speed up the import of Matplotlib and to better support multiprocessing.",
tmpdir, configdir)
return tmpdir
@_logged_cached('CONFIGDIR=%s')
def get_configdir():
"""
Return the string path of the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2. On Linux, follow the XDG specification and look first in
``$XDG_CONFIG_HOME``, if defined, or ``$HOME/.config``. On other
platforms, choose ``$HOME/.matplotlib``.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. Else, create a temporary directory, and use it as the configuration
directory.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir)
@_logged_cached('CACHEDIR=%s')
def get_cachedir():
"""
Return the string path of the cache directory.
The procedure used to find the directory is the same as for
`get_configdir`, except using ``$XDG_CACHE_HOME``/``$HOME/.cache`` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir)
@_logged_cached('matplotlib data path: %s')
def get_data_path():
"""Return the path to Matplotlib data."""
return str(Path(__file__).with_name("mpl-data"))
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- ``$PWD/matplotlibrc``
- ``$MATPLOTLIBRC`` if it is not a directory
- ``$MATPLOTLIBRC/matplotlibrc``
- ``$MPLCONFIGDIR/matplotlibrc``
- On Linux,
- ``$XDG_CONFIG_HOME/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is defined)
- or ``$HOME/.config/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME``
is not defined)
- On other platforms,
- ``$HOME/.matplotlib/matplotlibrc`` if ``$HOME`` is defined
- Lastly, it looks in ``$MATPLOTLIBDATA/matplotlibrc``, which should always
exist.
"""
def gen_candidates():
# rely on down-stream code to make absolute. This protects us
# from having to directly get the current working directory
# which can fail if the user has ended up with a cwd that is
# non-existent.
yield 'matplotlibrc'
try:
matplotlibrc = os.environ['MATPLOTLIBRC']
except KeyError:
pass
else:
yield matplotlibrc
yield os.path.join(matplotlibrc, 'matplotlibrc')
yield os.path.join(get_configdir(), 'matplotlibrc')
yield os.path.join(get_data_path(), 'matplotlibrc')
for fname in gen_candidates():
if os.path.exists(fname) and not os.path.isdir(fname):
return fname
raise RuntimeError("Could not find matplotlibrc file; your Matplotlib "
"install is broken")
# rcParams deprecated and automatically mapped to another key.
# Values are tuples of (version, new_name, f_old2new, f_new2old).
_deprecated_map = {}
# rcParams deprecated; some can manually be mapped to another key.
# Values are tuples of (version, new_name_or_None).
_deprecated_ignore_map = {}
# rcParams deprecated; can use None to suppress warnings; remain actually
# listed in the rcParams.
# Values are tuples of (version,)
_deprecated_remain_as_none = {}
@_docstring.Substitution(
"\n".join(map("- {}".format, sorted(rcsetup._validators, key=str.lower)))
)
class RcParams(MutableMapping, dict):
"""
A dict-like key-value store for config parameters, including validation.
Validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`.
The list of rcParams is:
%s
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
"""
validate = rcsetup._validators
# validate values on the way in
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def _set(self, key, val):
"""
Directly write data bypassing deprecation and validation logic.
Notes
-----
As end user or downstream library you almost always should use
``rcParams[key] = val`` and not ``_set()``.
There are only very few special cases that need direct data access.
These cases previously used ``dict.__setitem__(rcParams, key, val)``,
which is now deprecated and replaced by ``rcParams._set(key, val)``.
Even though private, we guarantee API stability for ``rcParams._set``,
i.e. it is subject to Matplotlib's API and deprecation policy.
:meta public:
"""
dict.__setitem__(self, key, val)
def _get(self, key):
"""
Directly read data bypassing deprecation, backend and validation
logic.
Notes
-----
As end user or downstream library you almost always should use
``val = rcParams[key]`` and not ``_get()``.
There are only very few special cases that need direct data access.
These cases previously used ``dict.__getitem__(rcParams, key, val)``,
which is now deprecated and replaced by ``rcParams._get(key)``.
Even though private, we guarantee API stability for ``rcParams._get``,
i.e. it is subject to Matplotlib's API and deprecation policy.
:meta public:
"""
return dict.__getitem__(self, key)
def _update_raw(self, other_params):
"""
Directly update the data from *other_params*, bypassing deprecation,
backend and validation logic on both sides.
This ``rcParams._update_raw(params)`` replaces the previous pattern
``dict.update(rcParams, params)``.
Parameters
----------
other_params : dict or `.RcParams`
The input mapping from which to update.
"""
if isinstance(other_params, RcParams):
other_params = dict.items(other_params)
dict.update(self, other_params)
def _ensure_has_backend(self):
"""
Ensure that a "backend" entry exists.
Normally, the default matplotlibrc file contains *no* entry for "backend" (the
corresponding line starts with ##, not #; we fill in _auto_backend_sentinel
in that case. However, packagers can set a different default backend
(resulting in a normal `#backend: foo` line) in which case we should *not*
fill in _auto_backend_sentinel.
"""
dict.setdefault(self, "backend", rcsetup._auto_backend_sentinel)
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
key = alt_key
val = alt_val(val)
elif key in _deprecated_remain_as_none and val is not None:
version, = _deprecated_remain_as_none[key]
_api.warn_deprecated(version, name=key, obj_type="rcparam")
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return
elif key == 'backend':
if val is rcsetup._auto_backend_sentinel:
if 'backend' in self:
return
try:
cval = self.validate[key](val)
except ValueError as ve:
raise ValueError(f"Key {key}: {ve}") from None
self._set(key, cval)
except KeyError as err:
raise KeyError(
f"{key} is not a valid rc parameter (see rcParams.keys() for "
f"a list of valid parameters)") from err
def __getitem__(self, key):
if key in _deprecated_map:
version, alt_key, alt_val, inverse_alt = _deprecated_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return inverse_alt(self._get(alt_key))
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
_api.warn_deprecated(
version, name=key, obj_type="rcparam", alternative=alt_key)
return self._get(alt_key) if alt_key else None
# In theory, this should only ever be used after the global rcParams
# has been set up, but better be safe e.g. in presence of breakpoints.
elif key == "backend" and self is globals().get("rcParams"):
val = self._get(key)
if val is rcsetup._auto_backend_sentinel:
from matplotlib import pyplot as plt
plt.switch_backend(rcsetup._auto_backend_sentinel)
return self._get(key)
def _get_backend_or_none(self):
"""Get the requested backend, if any, without triggering resolution."""
backend = self._get("backend")
return None if backend is rcsetup._auto_backend_sentinel else backend
def __repr__(self):
class_name = self.__class__.__name__
indent = len(class_name) + 1
with _api.suppress_matplotlib_deprecation_warning():
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return f'{class_name}({repr_indented})'
def __str__(self):
return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items())))
def __iter__(self):
"""Yield sorted list of keys."""
with _api.suppress_matplotlib_deprecation_warning():
yield from sorted(dict.__iter__(self))
def __len__(self):
return dict.__len__(self)
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def copy(self):
"""Copy this RcParams instance."""
rccopy = RcParams()
for k in self: # Skip deprecations and revalidation.
rccopy._set(k, self._get(k))
return rccopy
def rc_params(fail_on_error=False):
"""Construct a `RcParams` instance from the default Matplotlib rc file."""
return rc_params_from_file(matplotlib_fname(), fail_on_error)
@functools.cache
def _get_ssl_context():
try:
import certifi
except ImportError:
_log.debug("Could not import certifi.")
return None
import ssl
return ssl.create_default_context(cafile=certifi.where())
@contextlib.contextmanager
def _open_file_or_url(fname):
if (isinstance(fname, str)
and fname.startswith(('http://', 'https://', 'ftp://', 'file:'))):
import urllib.request
ssl_ctx = _get_ssl_context()
if ssl_ctx is None:
_log.debug(
"Could not get certifi ssl context, https may not work."
)
with urllib.request.urlopen(fname, context=ssl_ctx) as f:
yield (line.decode('utf-8') for line in f)
else:
fname = os.path.expanduser(fname)
with open(fname, encoding='utf-8') as f:
yield f
def _rc_params_in_file(fname, transform=lambda x: x, fail_on_error=False):
"""
Construct a `RcParams` instance from file *fname*.
Unlike `rc_params_from_file`, the configuration class only contains the
parameters specified in the file (i.e. default values are not filled in).
Parameters
----------
fname : path-like
The loaded file.
transform : callable, default: the identity function
A function called on each individual line of the file to transform it,
before further parsing.
fail_on_error : bool, default: False
Whether invalid entries should result in an exception or a warning.
"""
import matplotlib as mpl
rc_temp = {}
with _open_file_or_url(fname) as fd:
try:
for line_no, line in enumerate(fd, 1):
line = transform(line)
strippedline = cbook._strip_comment(line)
if not strippedline:
continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
_log.warning('Missing colon in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
continue
key, val = tup
key = key.strip()
val = val.strip()
if val.startswith('"') and val.endswith('"'):
val = val[1:-1] # strip double quotes
if key in rc_temp:
_log.warning('Duplicate key in file %r, line %d (%r)',
fname, line_no, line.rstrip('\n'))
rc_temp[key] = (val, line, line_no)
except UnicodeDecodeError:
_log.warning('Cannot decode configuration file %r as utf-8.',
fname)
raise
config = RcParams()
for key, (val, line, line_no) in rc_temp.items():
if key in rcsetup._validators:
if fail_on_error:
config[key] = val # try to convert to proper type or raise
else:
try:
config[key] = val # try to convert to proper type or skip
except Exception as msg:
_log.warning('Bad value in file %r, line %d (%r): %s',
fname, line_no, line.rstrip('\n'), msg)
elif key in _deprecated_ignore_map:
version, alt_key = _deprecated_ignore_map[key]
_api.warn_deprecated(
version, name=key, alternative=alt_key, obj_type='rcparam',
addendum="Please update your matplotlibrc.")
else:
# __version__ must be looked up as an attribute to trigger the
# module-level __getattr__.
version = ('main' if '.post' in mpl.__version__
else f'v{mpl.__version__}')
_log.warning("""
Bad key %(key)s in file %(fname)s, line %(line_no)s (%(line)r)
You probably need to get an updated matplotlibrc file from
https://github.com/matplotlib/matplotlib/blob/%(version)s/lib/matplotlib/mpl-data/matplotlibrc
or from the matplotlib source distribution""",
dict(key=key, fname=fname, line_no=line_no,
line=line.rstrip('\n'), version=version))
return config
def rc_params_from_file(fname, fail_on_error=False, use_default_template=True):
"""
Construct a `RcParams` from file *fname*.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
fail_on_error : bool
If True, raise an error when the parser fails to convert a parameter.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the configuration class only contains the
parameters specified in the file. (Useful for updating dicts.)
"""
config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error)
if not use_default_template:
return config_from_file
with _api.suppress_matplotlib_deprecation_warning():
config = RcParams({**rcParamsDefault, **config_from_file})
if "".join(config['text.latex.preamble']):
_log.info("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
""", '\n'.join(config['text.latex.preamble']))
_log.debug('loaded rc file %s', fname)
return config
rcParamsDefault = _rc_params_in_file(
cbook._get_data_path("matplotlibrc"),
# Strip leading comment.
transform=lambda line: line[1:] if line.startswith("#") else line,
fail_on_error=True)
rcParamsDefault._update_raw(rcsetup._hardcoded_defaults)
rcParamsDefault._ensure_has_backend()
rcParams = RcParams() # The global instance.
rcParams._update_raw(rcParamsDefault)
rcParams._update_raw(_rc_params_in_file(matplotlib_fname()))
rcParamsOrig = rcParams.copy()
with _api.suppress_matplotlib_deprecation_warning():
# This also checks that all rcParams are indeed listed in the template.
# Assigning to rcsetup.defaultParams is left only for backcompat.
defaultParams = rcsetup.defaultParams = {
# We want to resolve deprecated rcParams, but not backend...
key: [(rcsetup._auto_backend_sentinel if key == "backend" else
rcParamsDefault[key]),
validator]
for key, validator in rcsetup._validators.items()}
if rcParams['axes.formatter.use_locale']:
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current `.rcParams`. *group* is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, e.g.,::
rc('lines', linewidth=2, color='r')
sets the current `.rcParams` and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above call as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations. Use
``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to
restore the default `.rcParams` after changes.
Notes
-----
Similar functionality is available by using the normal dict interface, i.e.
``rcParams.update({"lines.linewidth": 2, ...})`` (but ``rcParams.update``
does not support abbreviations or grouping).
"""
aliases = {
'lw': 'linewidth',
'ls': 'linestyle',
'c': 'color',
'fc': 'facecolor',
'ec': 'edgecolor',
'mew': 'markeredgewidth',
'aa': 'antialiased',
}
if isinstance(group, str):
group = (group,)
for g in group:
for k, v in kwargs.items():
name = aliases.get(k) or k
key = f'{g}.{name}'
try:
rcParams[key] = v
except KeyError as err:
raise KeyError(('Unrecognized key "%s" for group "%s" and '
'name "%s"') % (key, g, name)) from err
def rcdefaults():
"""
Restore the `.rcParams` from Matplotlib's internal default style.
Style-blacklisted `.rcParams` (defined in
``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.
See Also
--------
matplotlib.rc_file_defaults
Restore the `.rcParams` from the rc file originally loaded by
Matplotlib.
matplotlib.style.use
Use a specific style file. Call ``style.use('default')`` to restore
the default style.
"""
# Deprecation warnings were already handled when creating rcParamsDefault,
# no need to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.clear()
rcParams.update({k: v for k, v in rcParamsDefault.items()
if k not in STYLE_BLACKLIST})
def rc_file_defaults():
"""
Restore the `.rcParams` from the original rc file loaded by Matplotlib.
Style-blacklisted `.rcParams` (defined in
``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.
"""
# Deprecation warnings were already handled when creating rcParamsOrig, no
# need to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig
if k not in STYLE_BLACKLIST})
def rc_file(fname, *, use_default_template=True):
"""
Update `.rcParams` from file.
Style-blacklisted `.rcParams` (defined in
``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the current configuration persists
and only the parameters specified in the file are updated.
"""
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager
def rc_context(rc=None, fname=None):
"""
Return a context manager for temporarily changing rcParams.
The :rc:`backend` will not be reset by the context manager.
rcParams changed both through the context manager invocation and
in the body of the context will be reset on context exit.
Parameters
----------
rc : dict
The rcParams to temporarily set.
fname : str or path-like
A file with Matplotlib rc settings. If both *fname* and *rc* are given,
settings from *rc* take precedence.
See Also
--------
:ref:`customizing-with-matplotlibrc-files`
Examples
--------
Passing explicit values via a dict::
with mpl.rc_context({'interactive': False}):
fig, ax = plt.subplots()
ax.plot(range(3), range(3))
fig.savefig('example.png')
plt.close(fig)
Loading settings from a file::
with mpl.rc_context(fname='print.rc'):
plt.plot(x, y) # uses 'print.rc'
Setting in the context body::
with mpl.rc_context():
# will be reset
mpl.rcParams['lines.linewidth'] = 5
plt.plot(x, y)
"""
orig = dict(rcParams.copy())
del orig['backend']
try:
if fname:
rc_file(fname)
if rc:
rcParams.update(rc)
yield
finally:
rcParams._update_raw(orig) # Revert to the original rcs.
def use(backend, *, force=True):
"""
Select the backend used for rendering and GUI integration.
If pyplot is already imported, `~matplotlib.pyplot.switch_backend` is used
and if the new backend is different than the current backend, all Figures
will be closed.
Parameters
----------
backend : str
The backend to switch to. This can either be one of the standard
backend names, which are case-insensitive:
- interactive backends:
GTK3Agg, GTK3Cairo, GTK4Agg, GTK4Cairo, MacOSX, nbAgg, notebook, QtAgg,
QtCairo, TkAgg, TkCairo, WebAgg, WX, WXAgg, WXCairo, Qt5Agg, Qt5Cairo
- non-interactive backends:
agg, cairo, pdf, pgf, ps, svg, template
or a string of the form: ``module://my.module.name``.
notebook is a synonym for nbAgg.
Switching to an interactive backend is not possible if an unrelated
event loop has already been started (e.g., switching to GTK3Agg if a
TkAgg window has already been opened). Switching to a non-interactive
backend is always possible.
force : bool, default: True
If True (the default), raise an `ImportError` if the backend cannot be
set up (either because it fails to import, or because an incompatible
GUI interactive framework is already running); if False, silently
ignore the failure.
See Also
--------
:ref:`backends`
matplotlib.get_backend
matplotlib.pyplot.switch_backend
"""
name = rcsetup.validate_backend(backend)
# don't (prematurely) resolve the "auto" backend setting
if rcParams._get_backend_or_none() == name:
# Nothing to do if the requested backend is already set
pass
else:
# if pyplot is not already imported, do not import it. Doing
# so may trigger a `plt.switch_backend` to the _default_ backend
# before we get a chance to change to the one the user just requested
plt = sys.modules.get('matplotlib.pyplot')
# if pyplot is imported, then try to change backends
if plt is not None:
try:
# we need this import check here to re-raise if the
# user does not have the libraries to support their
# chosen backend installed.
plt.switch_backend(name)
except ImportError:
if force:
raise
# if we have not imported pyplot, then we can set the rcParam
# value which will be respected when the user finally imports
# pyplot
else:
rcParams['backend'] = backend
# if the user has asked for a given backend, do not helpfully
# fallback
rcParams['backend_fallback'] = False
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def get_backend(*, auto_select=True):
"""
Return the name of the current backend.
Parameters
----------
auto_select : bool, default: True
Whether to trigger backend resolution if no backend has been
selected so far. If True, this ensures that a valid backend
is returned. If False, this returns None if no backend has been
selected so far.
.. versionadded:: 3.10
.. admonition:: Provisional
The *auto_select* flag is provisional. It may be changed or removed
without prior warning.
See Also
--------
matplotlib.use
"""
if auto_select:
return rcParams['backend']
else:
backend = rcParams._get('backend')
if backend is rcsetup._auto_backend_sentinel:
return None
else:
return backend
def interactive(b):
"""
Set whether to redraw after every plotting command (e.g. `.pyplot.xlabel`).
"""
rcParams['interactive'] = b
def is_interactive():
"""
Return whether to redraw after every plotting command.
.. note::
This function is only intended for use in backends. End users should
use `.pyplot.isinteractive` instead.
"""
return rcParams['interactive']
def _val_or_rc(val, rc_name):
"""
If *val* is None, return ``mpl.rcParams[rc_name]``, otherwise return val.
"""
return val if val is not None else rcParams[rc_name]
def _init_tests():
# The version of FreeType to install locally for running the tests. This must match
# the value in `meson.build`.
LOCAL_FREETYPE_VERSION = '2.6.1'
from matplotlib import ft2font
if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or
ft2font.__freetype_build_type__ != 'local'):
_log.warning(
"Matplotlib is not built with the correct FreeType version to run tests. "
"Rebuild without setting system-freetype=true in Meson setup options. "
"Expect many image comparison failures below. "
"Expected freetype version %s. "
"Found freetype version %s. "
"Freetype build type is %slocal.",
LOCAL_FREETYPE_VERSION,
ft2font.__freetype_version__,
"" if ft2font.__freetype_build_type__ == 'local' else "not ")
def _replacer(data, value):
"""
Either returns ``data[value]`` or passes ``data`` back, converts either to
a sequence.
"""
try:
# if key isn't a string don't bother
if isinstance(value, str):
# try to use __getitem__
value = data[value]
except Exception:
# key does not exist, silently fall back to key
pass
return cbook.sanitize_sequence(value)
def _label_from_arg(y, default_name):
try:
return y.name
except AttributeError:
if isinstance(default_name, str):
return default_name
return None
def _add_data_doc(docstring, replace_names):
"""
Add documentation for a *data* field to the given docstring.
Parameters
----------
docstring : str
The input docstring.
replace_names : list of str or None
The list of parameter names which arguments should be replaced by
``data[name]`` (if ``data[name]`` does not throw an exception). If
None, replacement is attempted for all arguments.
Returns
-------
str
The augmented docstring.
"""
if (docstring is None
or replace_names is not None and len(replace_names) == 0):
return docstring
docstring = inspect.cleandoc(docstring)
data_doc = ("""\
If given, all parameters also accept a string ``s``, which is
interpreted as ``data[s]`` if ``s`` is a key in ``data``."""
if replace_names is None else f"""\
If given, the following parameters also accept a string ``s``, which is
interpreted as ``data[s]`` if ``s`` is a key in ``data``:
{', '.join(map('*{}*'.format, replace_names))}""")
# using string replacement instead of formatting has the advantages
# 1) simpler indent handling
# 2) prevent problems with formatting characters '{', '%' in the docstring
if _log.level <= logging.DEBUG:
# test_data_parameter_replacement() tests against these log messages
# make sure to keep message and test in sync
if "data : indexable object, optional" not in docstring:
_log.debug("data parameter docstring error: no data parameter")
if 'DATA_PARAMETER_PLACEHOLDER' not in docstring:
_log.debug("data parameter docstring error: missing placeholder")
return docstring.replace(' DATA_PARAMETER_PLACEHOLDER', data_doc)
def _preprocess_data(func=None, *, replace_names=None, label_namer=None):
"""
A decorator to add a 'data' kwarg to a function.
When applied::
@_preprocess_data()
def func(ax, *args, **kwargs): ...
the signature is modified to ``decorated(ax, *args, data=None, **kwargs)``
with the following behavior:
- if called with ``data=None``, forward the other arguments to ``func``;
- otherwise, *data* must be a mapping; for any argument passed in as a
string ``name``, replace the argument by ``data[name]`` (if this does not
throw an exception), then forward the arguments to ``func``.
In either case, any argument that is a `MappingView` is also converted to a
list.
Parameters
----------
replace_names : list of str or None, default: None
The list of parameter names for which lookup into *data* should be
attempted. If None, replacement is attempted for all arguments.
label_namer : str, default: None
If set e.g. to "namer" (which must be a kwarg in the function's
signature -- not as ``**kwargs``), if the *namer* argument passed in is
a (string) key of *data* and no *label* kwarg is passed, then use the
(string) value of the *namer* as *label*. ::
@_preprocess_data(label_namer="foo")
def func(foo, label=None): ...
func("key", data={"key": value})
# is equivalent to
func.__wrapped__(value, label="key")
"""
if func is None: # Return the actual decorator.
return functools.partial(
_preprocess_data,
replace_names=replace_names, label_namer=label_namer)
sig = inspect.signature(func)
varargs_name = None
varkwargs_name = None
arg_names = []
params = list(sig.parameters.values())
for p in params:
if p.kind is Parameter.VAR_POSITIONAL:
varargs_name = p.name
elif p.kind is Parameter.VAR_KEYWORD:
varkwargs_name = p.name
else:
arg_names.append(p.name)
data_param = Parameter("data", Parameter.KEYWORD_ONLY, default=None)
if varkwargs_name:
params.insert(-1, data_param)
else:
params.append(data_param)
new_sig = sig.replace(parameters=params)
arg_names = arg_names[1:] # remove the first "ax" / self arg
assert {*arg_names}.issuperset(replace_names or []) or varkwargs_name, (
"Matplotlib internal error: invalid replace_names "
f"({replace_names!r}) for {func.__name__!r}")
assert label_namer is None or label_namer in arg_names, (
"Matplotlib internal error: invalid label_namer "
f"({label_namer!r}) for {func.__name__!r}")
@functools.wraps(func)
def inner(ax, *args, data=None, **kwargs):
if data is None:
return func(
ax,
*map(cbook.sanitize_sequence, args),
**{k: cbook.sanitize_sequence(v) for k, v in kwargs.items()})
bound = new_sig.bind(ax, *args, **kwargs)
auto_label = (bound.arguments.get(label_namer)
or bound.kwargs.get(label_namer))
for k, v in bound.arguments.items():
if k == varkwargs_name:
for k1, v1 in v.items():
if replace_names is None or k1 in replace_names:
v[k1] = _replacer(data, v1)
elif k == varargs_name:
if replace_names is None:
bound.arguments[k] = tuple(_replacer(data, v1) for v1 in v)
else:
if replace_names is None or k in replace_names:
bound.arguments[k] = _replacer(data, v)
new_args = bound.args
new_kwargs = bound.kwargs
args_and_kwargs = {**bound.arguments, **bound.kwargs}
if label_namer and "label" not in args_and_kwargs:
new_kwargs["label"] = _label_from_arg(
args_and_kwargs.get(label_namer), auto_label)
return func(*new_args, **new_kwargs)
inner.__doc__ = _add_data_doc(inner.__doc__, replace_names)
inner.__signature__ = new_sig
return inner
_log.debug('interactive is %s', is_interactive())
_log.debug('platform is %s', sys.platform)
@_api.deprecated("3.10", alternative="matplotlib.cbook.sanitize_sequence")
def sanitize_sequence(data):
return cbook.sanitize_sequence(data)
@_api.deprecated("3.10", alternative="matplotlib.rcsetup.validate_backend")
def validate_backend(s):
return rcsetup.validate_backend(s)
# workaround: we must defer colormaps import to after loading rcParams, because
# colormap creation depends on rcParams
from matplotlib.cm import _colormaps as colormaps # noqa: E402
from matplotlib.cm import _multivar_colormaps as multivar_colormaps # noqa: E402
from matplotlib.cm import _bivar_colormaps as bivar_colormaps # noqa: E402
from matplotlib.colors import _color_sequences as color_sequences # noqa: E402
venv\Lib\site-packages\numpy\conftest.py
"""
Pytest configuration and fixtures for the Numpy test suite.
"""
import os
import string
import sys
import tempfile
import warnings
from contextlib import contextmanager
import hypothesis
import pytest
import numpy
import numpy as np
from numpy._core._multiarray_tests import get_fpu_mode
from numpy._core.tests._natype import get_stringdtype_dtype, pd_NA
from numpy.testing._private.utils import NOGIL_BUILD
try:
from scipy_doctest.conftest import dt_config
HAVE_SCPDT = True
except ModuleNotFoundError:
HAVE_SCPDT = False
_old_fpu_mode = None
_collect_results = {}
# Use a known and persistent tmpdir for hypothesis' caches, which
# can be automatically cleared by the OS or user.
hypothesis.configuration.set_hypothesis_home_dir(
os.path.join(tempfile.gettempdir(), ".hypothesis")
)
# We register two custom profiles for Numpy - for details see
# https://hypothesis.readthedocs.io/en/latest/settings.html
# The first is designed for our own CI runs; the latter also
# forces determinism and is designed for use via np.test()
hypothesis.settings.register_profile(
name="numpy-profile", deadline=None, print_blob=True,
)
hypothesis.settings.register_profile(
name="np.test() profile",
deadline=None, print_blob=True, database=None, derandomize=True,
suppress_health_check=list(hypothesis.HealthCheck),
)
# Note that the default profile is chosen based on the presence
# of pytest.ini, but can be overridden by passing the
# --hypothesis-profile=NAME argument to pytest.
_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
hypothesis.settings.load_profile(
"numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
)
# The experimentalAPI is used in _umath_tests
os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1"
def pytest_configure(config):
config.addinivalue_line("markers",
"valgrind_error: Tests that are known to error under valgrind.")
config.addinivalue_line("markers",
"leaks_references: Tests that are known to leak references.")
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
config.addinivalue_line("markers",
"slow_pypy: Tests that are very slow on pypy.")
def pytest_addoption(parser):
parser.addoption("--available-memory", action="store", default=None,
help=("Set amount of memory available for running the "
"test suite. This can result to tests requiring "
"especially large amounts of memory to be skipped. "
"Equivalent to setting environment variable "
"NPY_AVAILABLE_MEM. Default: determined"
"automatically."))
gil_enabled_at_start = True
if NOGIL_BUILD:
gil_enabled_at_start = sys._is_gil_enabled()
def pytest_sessionstart(session):
available_mem = session.config.getoption('available_memory')
if available_mem is not None:
os.environ['NPY_AVAILABLE_MEM'] = available_mem
def pytest_terminal_summary(terminalreporter, exitstatus, config):
if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled():
tr = terminalreporter
tr.ensure_newline()
tr.section("GIL re-enabled", sep="=", red=True, bold=True)
tr.line("The GIL was re-enabled at runtime during the tests.")
tr.line("This can happen with no test failures if the RuntimeWarning")
tr.line("raised by Python when this happens is filtered by a test.")
tr.line("")
tr.line("Please ensure all new C modules declare support for running")
tr.line("without the GIL. Any new tests that intentionally imports ")
tr.line("code that re-enables the GIL should do so in a subprocess.")
pytest.exit("GIL re-enabled during tests", returncode=1)
# FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
"""
Check FPU precision mode was not changed during test collection.
The clumsy way we do it here is mainly necessary because numpy
still uses yield tests, which can execute code at test collection
time.
"""
global _old_fpu_mode
mode = get_fpu_mode()
if _old_fpu_mode is None:
_old_fpu_mode = mode
elif mode != _old_fpu_mode:
_collect_results[item] = (_old_fpu_mode, mode)
_old_fpu_mode = mode
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU precision mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
f"{new_mode:#x} during the test")
collect_result = _collect_results.get(request.node)
if collect_result is not None:
old_mode, new_mode = collect_result
raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
f"{new_mode:#x} when collecting the test")
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace['np'] = numpy
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
monkeypatch.setenv('PYTHONHASHSEED', '0')
if HAVE_SCPDT:
@contextmanager
def warnings_errors_and_rng(test=None):
"""Filter out the wall of DeprecationWarnings.
"""
msgs = ["The numpy.linalg.linalg",
"The numpy.fft.helper",
"dep_util",
"pkg_resources",
"numpy.core.umath",
"msvccompiler",
"Deprecated call",
"numpy.core",
"Importing from numpy.matlib",
"This function is deprecated.", # random_integers
"Data type alias 'a'", # numpy.rec.fromfile
"Arrays of 2-dimensional vectors", # matlib.cross
"`in1d` is deprecated", ]
msg = "|".join(msgs)
msgs_r = [
"invalid value encountered",
"divide by zero encountered"
]
msg_r = "|".join(msgs_r)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', category=DeprecationWarning, message=msg
)
warnings.filterwarnings(
'ignore', category=RuntimeWarning, message=msg_r
)
yield
# find and check doctests under this context manager
dt_config.user_context_mgr = warnings_errors_and_rng
# numpy specific tweaks from refguide-check
dt_config.rndm_markers.add('#uninitialized')
dt_config.rndm_markers.add('# uninitialized')
# make the checker pick on mismatched dtypes
dt_config.strict_check = True
import doctest
dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# recognize the StringDType repr
dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType
# temporary skips
dt_config.skiplist = {
'numpy.savez', # unclosed file
'numpy.matlib.savez',
'numpy.__array_namespace_info__',
'numpy.matlib.__array_namespace_info__',
}
# xfail problematic tutorials
dt_config.pytest_extra_xfail = {
'how-to-verify-bug.rst': '',
'c-info.ufunc-tutorial.rst': '',
'basics.interoperability.rst': 'needs pandas',
'basics.dispatch.rst': 'errors out in /testing/overrides.py',
'basics.subclassing.rst': '.. testcode:: admonitions not understood',
'misc.rst': 'manipulates warnings',
}
# ignores are for things fail doctest collection (optionals etc)
dt_config.pytest_extra_ignore = [
'numpy/distutils',
'numpy/_core/cversions.py',
'numpy/_pyinstaller',
'numpy/random/_examples',
'numpy/f2py/_backends/_distutils.py',
]
@pytest.fixture
def random_string_list():
chars = list(string.ascii_letters + string.digits)
chars = np.array(chars, dtype="U1")
ret = np.random.choice(chars, size=100 * 10, replace=True)
return ret.view("U100")
@pytest.fixture(params=[True, False])
def coerce(request):
return request.param
@pytest.fixture(
params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"],
ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"],
)
def na_object(request):
return request.param
@pytest.fixture()
def dtype(na_object, coerce):
return get_stringdtype_dtype(na_object, coerce)
venv\Lib\site-packages\numpy\dtypes.py
"""
This module is home to specific dtypes related functionality and their classes.
For more general information about dtypes, also see `numpy.dtype` and
:ref:`arrays.dtypes`.
Similar to the builtin ``types`` module, this submodule defines types (classes)
that are not widely used directly.
.. versionadded:: NumPy 1.25
The dtypes module is new in NumPy 1.25. Previously DType classes were
only accessible indirectly.
DType classes
-------------
The following are the classes of the corresponding NumPy dtype instances and
NumPy scalar types. The classes can be used in ``isinstance`` checks and can
also be instantiated or used directly. Direct use of these classes is not
typical, since their scalar counterparts (e.g. ``np.float64``) or strings
like ``"float64"`` can be used.
"""
# See doc/source/reference/routines.dtypes.rst for module-level docs
__all__ = []
def _add_dtype_helper(DType, alias):
# Function to add DTypes a bit more conveniently without channeling them
# through `numpy._core._multiarray_umath` namespace or similar.
from numpy import dtypes
setattr(dtypes, DType.__name__, DType)
__all__.append(DType.__name__)
if alias:
alias = alias.removeprefix("numpy.dtypes.")
setattr(dtypes, alias, DType)
__all__.append(alias)
venv\Lib\site-packages\numpy\exceptions.py
"""
Exceptions and Warnings
=======================
General exceptions used by NumPy. Note that some exceptions may be module
specific, such as linear algebra errors.
.. versionadded:: NumPy 1.25
The exceptions module is new in NumPy 1.25. Older exceptions remain
available through the main NumPy namespace for compatibility.
.. currentmodule:: numpy.exceptions
Warnings
--------
.. autosummary::
:toctree: generated/
ComplexWarning Given when converting complex to real.
VisibleDeprecationWarning Same as a DeprecationWarning, but more visible.
RankWarning Issued when the design matrix is rank deficient.
Exceptions
----------
.. autosummary::
:toctree: generated/
AxisError Given when an axis was invalid.
DTypePromotionError Given when no common dtype could be found.
TooHardError Error specific to `numpy.shares_memory`.
"""
__all__ = [
"ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning",
"TooHardError", "AxisError", "DTypePromotionError"]
# Disallow reloading this module so as to preserve the identities of the
# classes defined here.
if '_is_loaded' in globals():
raise RuntimeError('Reloading numpy._globals is not allowed')
_is_loaded = True
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
class ModuleDeprecationWarning(DeprecationWarning):
"""Module deprecation warning.
.. warning::
This warning should not be used, since nose testing is not relevant
anymore.
The nose tester turns ordinary Deprecation warnings into test failures.
That makes it hard to deprecate whole modules, because they get
imported by default. So this is a special Deprecation warning that the
nose tester will let pass without making tests fail.
"""
pass
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
By default, python will not show deprecation warnings, so this class
can be used when a very visible warning is helpful, for example because
the usage is most likely a user bug.
"""
pass
class RankWarning(RuntimeWarning):
"""Matrix rank warning.
Issued by polynomial functions when the design matrix is rank deficient.
"""
pass
# Exception used in shares_memory()
class TooHardError(RuntimeError):
"""``max_work`` was exceeded.
This is raised whenever the maximum number of candidate solutions
to consider specified by the ``max_work`` parameter is exceeded.
Assigning a finite number to ``max_work`` may have caused the operation
to fail.
"""
pass
class AxisError(ValueError, IndexError):
"""Axis supplied was invalid.
This is raised whenever an ``axis`` parameter is specified that is larger
than the number of array dimensions.
For compatibility with code written against older numpy versions, which
raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this
situation, this exception subclasses both to ensure that
``except ValueError`` and ``except IndexError`` statements continue
to catch ``AxisError``.
Parameters
----------
axis : int or str
The out of bounds axis or a custom exception message.
If an axis is provided, then `ndim` should be specified as well.
ndim : int, optional
The number of array dimensions.
msg_prefix : str, optional
A prefix for the exception message.
Attributes
----------
axis : int, optional
The out of bounds axis or ``None`` if a custom exception
message was provided. This should be the axis as passed by
the user, before any normalization to resolve negative indices.
.. versionadded:: 1.22
ndim : int, optional
The number of array dimensions or ``None`` if a custom exception
message was provided.
.. versionadded:: 1.22
Examples
--------
>>> import numpy as np
>>> array_1d = np.arange(10)
>>> np.cumsum(array_1d, axis=1)
Traceback (most recent call last):
...
numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1
Negative axes are preserved:
>>> np.cumsum(array_1d, axis=-2)
Traceback (most recent call last):
...
numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1
The class constructor generally takes the axis and arrays'
dimensionality as arguments:
>>> print(np.exceptions.AxisError(2, 1, msg_prefix='error'))
error: axis 2 is out of bounds for array of dimension 1
Alternatively, a custom exception message can be passed:
>>> print(np.exceptions.AxisError('Custom error message'))
Custom error message
"""
__slots__ = ("_msg", "axis", "ndim")
def __init__(self, axis, ndim=None, msg_prefix=None):
if ndim is msg_prefix is None:
# single-argument form: directly set the error message
self._msg = axis
self.axis = None
self.ndim = None
else:
self._msg = msg_prefix
self.axis = axis
self.ndim = ndim
def __str__(self):
axis = self.axis
ndim = self.ndim
if axis is ndim is None:
return self._msg
else:
msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
if self._msg is not None:
msg = f"{self._msg}: {msg}"
return msg
class DTypePromotionError(TypeError):
"""Multiple DTypes could not be converted to a common one.
This exception derives from ``TypeError`` and is raised whenever dtypes
cannot be converted to a single common one. This can be because they
are of a different category/class or incompatible instances of the same
one (see Examples).
Notes
-----
Many functions will use promotion to find the correct result and
implementation. For these functions the error will typically be chained
with a more specific error indicating that no implementation was found
for the input dtypes.
Typically promotion should be considered "invalid" between the dtypes of
two arrays when `arr1 == arr2` can safely return all ``False`` because the
dtypes are fundamentally different.
Examples
--------
Datetimes and complex numbers are incompatible classes and cannot be
promoted:
>>> import numpy as np
>>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DTypePromotionError: The DType could not
be promoted by . This means that no common
DType exists for the given inputs. For example they cannot be stored in a
single array unless the dtype is `object`. The full list of DTypes is:
(, )
For example for structured dtypes, the structure can mismatch and the
same ``DTypePromotionError`` is given when two structured dtypes with
a mismatch in their number of fields is given:
>>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)])
>>> dtype2 = np.dtype([("field1", np.float64)])
>>> np.promote_types(dtype1, dtype2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DTypePromotionError: field names `('field1', 'field2')` and `('field1',)`
mismatch.
""" # noqa: E501
pass
venv\Lib\site-packages\numpy\matlib.py
import warnings
# 2018-05-29, PendingDeprecationWarning added to matrix.__new__
# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning
warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. "
"The matrix subclass is not the recommended way to represent "
"matrices or deal with linear algebra (see "
"https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). "
"Please adjust your code to use regular ndarray. ",
PendingDeprecationWarning, stacklevel=2)
import numpy as np
# Matlib.py contains all functions in the numpy namespace with a few
# replacements. See doc/source/reference/routines.matlib.rst for details.
# Need * as we're copying the numpy namespace.
from numpy import * # noqa: F403
from numpy.matrixlib.defmatrix import asmatrix, matrix
__version__ = np.__version__
__all__ = ['rand', 'randn', 'repmat']
__all__ += np.__all__
def empty(shape, dtype=None, order='C'):
"""Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
See Also
--------
numpy.empty : Equivalent array function.
matlib.zeros : Return a matrix of zeros.
matlib.ones : Return a matrix of ones.
Notes
-----
Unlike other matrix creation functions (e.g. `matlib.zeros`,
`matlib.ones`), `matlib.empty` does not initialize the values of the
matrix, and may therefore be marginally faster. However, the values
stored in the newly allocated matrix are arbitrary. For reproducible
behavior, be sure to set each element of the matrix before reading.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307], # random
[ 7.39337286e-309, 3.22135945e-309]])
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0], # random
[ 6586976, 22740995]])
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[1., 1., 1.],
[1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[0., 0., 0.],
[0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n, dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1] + n * [0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
def eye(n, M=None, k=0, dtype=float, order='C'):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.RandomState.rand
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[0.69646919, 0.28613933, 0.22685145],
[0.55131477, 0.71946897, 0.42310646]])
>>> np.matlib.rand((2, 3))
matrix([[0.9807642 , 0.68482974, 0.4809319 ],
[0.39211752, 0.34317802, 0.72904971]])
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[0.43857224, 0.0596779 , 0.39804426],
[0.73799541, 0.18249173, 0.17545176]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, numpy.random.RandomState.randn
Notes
-----
For random samples from the normal distribution with mean ``mu`` and
standard deviation ``sigma``, use::
sigma * np.matlib.randn(...) + mu
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-1.0856306]])
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.99734545, 0.2829785 , -1.50629471],
[-0.57860025, 1.65143654, -2.42667924]])
Two-by-four matrix of samples from the normal distribution with
mean 3 and standard deviation 2.5:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462],
[2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
venv\Lib\site-packages\numpy\version.py
"""
Module to expose more detailed version info for the installed `numpy`
"""
version = "2.3.2"
__version__ = version
full_version = version
git_revision = "bc5e4f811db9487a9ea1618ffb77a33b3919bb8e"
release = 'dev' not in version and '+' not in version
short_version = version.split("+")[0]
venv\Lib\site-packages\numpy\_array_api_info.py
"""
Array API Inspection namespace
This is the namespace for inspection functions as defined by the array API
standard. See
https://data-apis.org/array-api/latest/API_specification/inspection.html for
more details.
"""
from numpy._core import (
bool,
complex64,
complex128,
dtype,
float32,
float64,
int8,
int16,
int32,
int64,
intp,
uint8,
uint16,
uint32,
uint64,
)
class __array_namespace_info__:
"""
Get the array API inspection namespace for NumPy.
The array API inspection namespace defines the following functions:
- capabilities()
- default_device()
- default_dtypes()
- dtypes()
- devices()
See
https://data-apis.org/array-api/latest/API_specification/inspection.html
for more details.
Returns
-------
info : ModuleType
The array API inspection namespace for NumPy.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
__module__ = 'numpy'
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for NumPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
NumPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
def default_device(self):
"""
The default device used for new NumPy arrays.
For NumPy, this always returns ``'cpu'``.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
device : str
The default device used for new NumPy arrays.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_device()
'cpu'
"""
return "cpu"
def default_dtypes(self, *, device=None):
"""
The default data types used for new NumPy arrays.
For NumPy, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for. For NumPy, only
``'cpu'`` is allowed.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new NumPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by NumPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for. For NumPy, only ``'cpu'`` is
allowed.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
NumPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
def devices(self):
"""
The devices supported by NumPy.
For NumPy, this always returns ``['cpu']``.
Returns
-------
devices : list of str
The devices supported by NumPy.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.devices()
['cpu']
"""
return ["cpu"]
venv\Lib\site-packages\numpy\_configtool.py
import argparse
import sys
from pathlib import Path
from .lib._utils_impl import get_include
from .version import __version__
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
action="version",
version=__version__,
help="Print the version and exit.",
)
parser.add_argument(
"--cflags",
action="store_true",
help="Compile flag needed when using the NumPy headers.",
)
parser.add_argument(
"--pkgconfigdir",
action="store_true",
help=("Print the pkgconfig directory in which `numpy.pc` is stored "
"(useful for setting $PKG_CONFIG_PATH)."),
)
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.cflags:
print("-I" + get_include())
if args.pkgconfigdir:
_path = Path(get_include()) / '..' / 'lib' / 'pkgconfig'
print(_path.resolve())
if __name__ == "__main__":
main()
venv\Lib\site-packages\numpy\_distributor_init.py
""" Distributor init file
Distributors: you can add custom code here to support particular distributions
of numpy.
For example, this is a good place to put any BLAS/LAPACK initialization code.
The numpy standard source distribution will not put code in this file, so you
can safely replace this file with your own version.
"""
try:
from . import _distributor_init_local # noqa: F401
except ImportError:
pass
"""
Dict of expired attributes that are discontinued since 2.0 release.
Each item is associated with a migration note.
"""
__expired_attributes__ = {
"geterrobj": "Use the np.errstate context manager instead.",
"seterrobj": "Use the np.errstate context manager instead.",
"cast": "Use `np.asarray(arr, dtype=dtype)` instead.",
"source": "Use `inspect.getsource` instead.",
"lookfor": "Search NumPy's documentation directly.",
"who": "Use an IDE variable explorer or `locals()` instead.",
"fastCopyAndTranspose": "Use `arr.T.copy()` instead.",
"set_numeric_ops":
"For the general case, use `PyUFunc_ReplaceLoopBySignature`. "
"For ndarray subclasses, define the ``__array_ufunc__`` method "
"and override the relevant ufunc.",
"NINF": "Use `-np.inf` instead.",
"PINF": "Use `np.inf` instead.",
"NZERO": "Use `-0.0` instead.",
"PZERO": "Use `0.0` instead.",
"add_newdoc":
"It's still available as `np.lib.add_newdoc`.",
"add_docstring":
"It's still available as `np.lib.add_docstring`.",
"add_newdoc_ufunc":
"It's an internal function and doesn't have a replacement.",
"safe_eval": "Use `ast.literal_eval` instead.",
"float_": "Use `np.float64` instead.",
"complex_": "Use `np.complex128` instead.",
"longfloat": "Use `np.longdouble` instead.",
"singlecomplex": "Use `np.complex64` instead.",
"cfloat": "Use `np.complex128` instead.",
"longcomplex": "Use `np.clongdouble` instead.",
"clongfloat": "Use `np.clongdouble` instead.",
"string_": "Use `np.bytes_` instead.",
"unicode_": "Use `np.str_` instead.",
"Inf": "Use `np.inf` instead.",
"Infinity": "Use `np.inf` instead.",
"NaN": "Use `np.nan` instead.",
"infty": "Use `np.inf` instead.",
"issctype": "Use `issubclass(rep, np.generic)` instead.",
"maximum_sctype":
"Use a specific dtype instead. You should avoid relying "
"on any implicit mechanism and select the largest dtype of "
"a kind explicitly in the code.",
"obj2sctype": "Use `np.dtype(obj).type` instead.",
"sctype2char": "Use `np.dtype(obj).char` instead.",
"sctypes": "Access dtypes explicitly instead.",
"issubsctype": "Use `np.issubdtype` instead.",
"set_string_function":
"Use `np.set_printoptions` instead with a formatter for "
"custom printing of NumPy objects.",
"asfarray": "Use `np.asarray` with a proper dtype instead.",
"issubclass_": "Use `issubclass` builtin instead.",
"tracemalloc_domain": "It's now available from `np.lib`.",
"mat": "Use `np.asmatrix` instead.",
"recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.",
"recfromtxt": "Use `np.genfromtxt` instead.",
"deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, "
"or use `typing.deprecated`.",
"deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` "
"directly, or use `typing.deprecated`.",
"disp": "Use your own printing function instead.",
"find_common_type":
"Use `numpy.promote_types` or `numpy.result_type` instead. "
"To achieve semantics for the `scalar_types` argument, use "
"`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.",
"round_": "Use `np.round` instead.",
"get_array_wrap": "",
"DataSource": "It's still available as `np.lib.npyio.DataSource`.",
"nbytes": "Use `np.dtype().itemsize` instead.",
"byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`",
"compare_chararrays":
"It's still available as `np.char.compare_chararrays`.",
"format_parser": "It's still available as `np.rec.format_parser`.",
"alltrue": "Use `np.all` instead.",
"sometrue": "Use `np.any` instead.",
}
venv\Lib\site-packages\numpy\_globals.py
"""
Module defining global singleton classes.
This module raises a RuntimeError if an attempt to reload it is made. In that
way the identities of the classes defined here are fixed and will remain so
even if numpy itself is reloaded. In particular, a function like the following
will still work correctly after numpy is reloaded::
def foo(arg=np._NoValue):
if arg is np._NoValue:
...
That was not the case when the singleton classes were defined in the numpy
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
motivated this module.
"""
import enum
from ._utils import set_module as _set_module
__all__ = ['_NoValue', '_CopyMode']
# Disallow reloading this module so as to preserve the identities of the
# classes defined here.
if '_is_loaded' in globals():
raise RuntimeError('Reloading numpy._globals is not allowed')
_is_loaded = True
class _NoValueType:
"""Special keyword value.
The instance of this class may be used as the default value assigned to a
keyword if no other obvious default (e.g., `None`) is suitable,
Common reasons for using this keyword are:
- A new keyword is added to a function, and that function forwards its
inputs to another function or method which can be defined outside of
NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims``
keyword was added that could only be forwarded if the user explicitly
specified ``keepdims``; downstream array libraries may not have added
the same keyword, so adding ``x.std(..., keepdims=keepdims)``
unconditionally could have broken previously working code.
- A keyword is being deprecated, and a deprecation warning must only be
emitted when the keyword is used.
"""
__instance = None
def __new__(cls):
# ensure that only one instance exists
if not cls.__instance:
cls.__instance = super().__new__(cls)
return cls.__instance
def __repr__(self):
return ""
_NoValue = _NoValueType()
@_set_module("numpy")
class _CopyMode(enum.Enum):
"""
An enumeration for the copy modes supported
by numpy.copy() and numpy.array(). The following three modes are supported,
- ALWAYS: This means that a deep copy of the input
array will always be taken.
- IF_NEEDED: This means that a deep copy of the input
array will be taken only if necessary.
- NEVER: This means that the deep copy will never be taken.
If a copy cannot be avoided then a `ValueError` will be
raised.
Note that the buffer-protocol could in theory do copies. NumPy currently
assumes an object exporting the buffer protocol will never do this.
"""
ALWAYS = True
NEVER = False
IF_NEEDED = 2
def __bool__(self):
# For backwards compatibility
if self == _CopyMode.ALWAYS:
return True
if self == _CopyMode.NEVER:
return False
raise ValueError(f"{self} is neither True nor False.")
venv\Lib\site-packages\numpy\_pytesttester.py
"""
Pytest test running.
This module implements the ``test()`` function for NumPy modules. The usual
boiler plate for doing that is to put the following in the module
``__init__.py`` file::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
Warnings filtering and other runtime settings should be dealt with in the
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
whether or not that file is found as follows:
* ``pytest.ini`` is present (develop mode)
All warnings except those explicitly filtered out are raised as error.
* ``pytest.ini`` is absent (release mode)
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
warnings are passed through.
In practice, tests run from the numpy repo are run in development mode with
``spin``, through the standard ``spin test`` invocation or from an inplace
build with ``pytest numpy``.
This module is imported by every numpy subpackage, so lies at the top level to
simplify circular import issues. For the same reason, it contains no numpy
imports at module scope, instead importing numpy within function calls.
"""
import os
import sys
__all__ = ['PytestTester']
def _show_numpy_info():
import numpy as np
print(f"NumPy version {np.__version__}")
info = np.lib._utils_impl._opt_info()
print("NumPy CPU features: ", (info or 'nothing enabled'))
class PytestTester:
"""
Pytest test runner.
A test function is typically added to a package's __init__.py like so::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
Calling this test function finds and runs all tests associated with the
module and all its sub-modules.
Attributes
----------
module_name : str
Full path to the package to test.
Parameters
----------
module_name : module name
The name of the module to test.
Notes
-----
Unlike the previous ``nose``-based implementation, this class is not
publicly exposed as it performs some ``numpy``-specific warning
suppression.
"""
def __init__(self, module_name):
self.module_name = module_name
self.__module__ = module_name
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
"""
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for
it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
...
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
>>> result
True
"""
import warnings
import pytest
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
# setup the pytest arguments
pytest_args = ["-l"]
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
if sys.version_info < (3, 12):
with warnings.catch_warnings():
warnings.simplefilter("always")
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
from numpy.distutils import cpuinfo # noqa: F401
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed",
"-W ignore::UserWarning:cpuinfo",
]
# When testing matrices, ignore their PendingDeprecationWarnings
pytest_args += [
"-W ignore:the matrix subclass is not",
"-W ignore:Importing from numpy.matlib is",
]
if doctests:
pytest_args += ["--doctest-modules"]
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v" * (verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
# not importing at the top level to avoid circular import of module
from numpy.testing import IS_PYPY
if IS_PYPY:
pytest_args += ["-m", "not slow and not slow_pypy"]
else:
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += [f"--durations={durations}"]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
# run tests.
_show_numpy_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0
venv\Lib\site-packages\numpy\__config__.py
# This file is generated by numpy's build process
# It contains system_info results at the time of building this package.
from enum import Enum
from numpy._core._multiarray_umath import (
__cpu_features__,
__cpu_baseline__,
__cpu_dispatch__,
)
__all__ = ["show_config"]
_built_with_meson = True
class DisplayModes(Enum):
stdout = "stdout"
dicts = "dicts"
def _cleanup(d):
"""
Removes empty values in a `dict` recursively
This ensures we remove values that Meson could not provide to CONFIG
"""
if isinstance(d, dict):
return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)}
else:
return d
CONFIG = _cleanup(
{
"Compilers": {
"c": {
"name": "msvc",
"linker": r"link",
"version": "19.44.35213",
"commands": r"cl",
"args": r"",
"linker args": r"",
},
"cython": {
"name": "cython",
"linker": r"cython",
"version": "3.1.2",
"commands": r"cython",
"args": r"",
"linker args": r"",
},
"c++": {
"name": "msvc",
"linker": r"link",
"version": "19.44.35213",
"commands": r"cl",
"args": r"",
"linker args": r"",
},
},
"Machine Information": {
"host": {
"cpu": "x86_64",
"family": "x86_64",
"endian": "little",
"system": "windows",
},
"build": {
"cpu": "x86_64",
"family": "x86_64",
"endian": "little",
"system": "windows",
},
"cross-compiled": bool("False".lower().replace("false", "")),
},
"Build Dependencies": {
"blas": {
"name": "scipy-openblas",
"found": bool("True".lower().replace("false", "")),
"version": "0.3.30",
"detection method": "pkgconfig",
"include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-sokdaj8v/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include",
"lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-sokdaj8v/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib",
"openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24",
"pc file directory": r"D:/a/numpy/numpy/.openblas",
},
"lapack": {
"name": "scipy-openblas",
"found": bool("True".lower().replace("false", "")),
"version": "0.3.30",
"detection method": "pkgconfig",
"include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-sokdaj8v/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include",
"lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-sokdaj8v/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib",
"openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24",
"pc file directory": r"D:/a/numpy/numpy/.openblas",
},
},
"Python Information": {
"path": r"C:\Users\runneradmin\AppData\Local\Temp\build-env-8hs1bp3b\Scripts\python.exe",
"version": "3.12",
},
"SIMD Extensions": {
"baseline": __cpu_baseline__,
"found": [
feature for feature in __cpu_dispatch__ if __cpu_features__[feature]
],
"not found": [
feature for feature in __cpu_dispatch__ if not __cpu_features__[feature]
],
},
}
)
def _check_pyyaml():
import yaml
return yaml
def show(mode=DisplayModes.stdout.value):
"""
Show libraries and system information on which NumPy was built
and is being used
Parameters
----------
mode : {`'stdout'`, `'dicts'`}, optional.
Indicates how to display the config information.
`'stdout'` prints to console, `'dicts'` returns a dictionary
of the configuration.
Returns
-------
out : {`dict`, `None`}
If mode is `'dicts'`, a dict is returned, else None
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
1. The `'stdout'` mode will give more readable
output if ``pyyaml`` is installed
"""
if mode == DisplayModes.stdout.value:
try: # Non-standard library, check import
yaml = _check_pyyaml()
print(yaml.dump(CONFIG))
except ModuleNotFoundError:
import warnings
import json
warnings.warn("Install `pyyaml` for better output", stacklevel=1)
print(json.dumps(CONFIG, indent=2))
elif mode == DisplayModes.dicts.value:
return CONFIG
else:
raise AttributeError(
f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}"
)
def show_config(mode=DisplayModes.stdout.value):
return show(mode)
show_config.__doc__ = show.__doc__
show_config.__module__ = "numpy"
venv\Lib\site-packages\numpy\__init__.py
"""
NumPy
=====
Provides
1. An array object of arbitrary homogeneous items
2. Fast mathematical operations over arrays
3. Linear Algebra, Fourier Transforms, Random Number Generation
How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
`the NumPy homepage `_.
We recommend exploring the docstrings using
`IPython `_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
The docstring examples assume that `numpy` has been imported as ``np``::
>>> import numpy as np
Code snippets are indicated by three greater-than signs::
>>> x = 42
>>> x = x + 1
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
of the help() page. Ufuncs are implemented in C, not Python, for speed.
The native Python help() does not know how to view their help, but our
np.info() function does.
Available subpackages
---------------------
lib
Basic functions used by several sub-packages.
random
Core Random Tools
linalg
Core Linear Algebra Tools
fft
Core FFT routines
polynomial
Polynomial tools
testing
NumPy testing tools
distutils
Enhancements to distutils with support for
Fortran compilers support and more (for Python <= 3.11)
Utilities
---------
test
Run numpy unittests
show_config
Show numpy build configuration
__version__
NumPy version string
Viewing documentation using IPython
-----------------------------------
Start IPython and import `numpy` usually under the alias ``np``: `import
numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste
examples into the shell. To see which functions are available in `numpy`,
type ``np.`` (where ```` refers to the TAB key), or use
``np.*cos*?`` (where ```` refers to the ENTER key) to narrow
down the list. To view the docstring for a function, use
``np.cos?`` (to view the docstring) and ``np.cos??`` (to view
the source code).
Copies vs. in-place operation
-----------------------------
Most of the functions in `numpy` return a copy of the array argument
(e.g., `np.sort`). In-place versions of these functions are often
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
# start delvewheel patch
def _delvewheel_patch_1_11_0():
import os
if os.path.isdir(libs_dir := os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'numpy.libs'))):
os.add_dll_directory(libs_dir)
_delvewheel_patch_1_11_0()
del _delvewheel_patch_1_11_0
# end delvewheel patch
import os
import sys
import warnings
# If a version with git hash was stored, use that instead
from . import version
from ._expired_attrs_2_0 import __expired_attributes__
from ._globals import _CopyMode, _NoValue
from .version import __version__
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__ # noqa: B018
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
sys.stderr.write('Running from numpy source directory.\n')
else:
# Allow distributors to run custom init code before importing numpy._core
from . import _distributor_init
try:
from numpy.__config__ import show_config
except ImportError as e:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg) from e
from . import _core
from ._core import (
False_,
ScalarType,
True_,
abs,
absolute,
acos,
acosh,
add,
all,
allclose,
amax,
amin,
any,
arange,
arccos,
arccosh,
arcsin,
arcsinh,
arctan,
arctan2,
arctanh,
argmax,
argmin,
argpartition,
argsort,
argwhere,
around,
array,
array2string,
array_equal,
array_equiv,
array_repr,
array_str,
asanyarray,
asarray,
ascontiguousarray,
asfortranarray,
asin,
asinh,
astype,
atan,
atan2,
atanh,
atleast_1d,
atleast_2d,
atleast_3d,
base_repr,
binary_repr,
bitwise_and,
bitwise_count,
bitwise_invert,
bitwise_left_shift,
bitwise_not,
bitwise_or,
bitwise_right_shift,
bitwise_xor,
block,
bool,
bool_,
broadcast,
busday_count,
busday_offset,
busdaycalendar,
byte,
bytes_,
can_cast,
cbrt,
cdouble,
ceil,
character,
choose,
clip,
clongdouble,
complex64,
complex128,
complexfloating,
compress,
concat,
concatenate,
conj,
conjugate,
convolve,
copysign,
copyto,
correlate,
cos,
cosh,
count_nonzero,
cross,
csingle,
cumprod,
cumsum,
cumulative_prod,
cumulative_sum,
datetime64,
datetime_as_string,
datetime_data,
deg2rad,
degrees,
diagonal,
divide,
divmod,
dot,
double,
dtype,
e,
einsum,
einsum_path,
empty,
empty_like,
equal,
errstate,
euler_gamma,
exp,
exp2,
expm1,
fabs,
finfo,
flatiter,
flatnonzero,
flexible,
float16,
float32,
float64,
float_power,
floating,
floor,
floor_divide,
fmax,
fmin,
fmod,
format_float_positional,
format_float_scientific,
frexp,
from_dlpack,
frombuffer,
fromfile,
fromfunction,
fromiter,
frompyfunc,
fromstring,
full,
full_like,
gcd,
generic,
geomspace,
get_printoptions,
getbufsize,
geterr,
geterrcall,
greater,
greater_equal,
half,
heaviside,
hstack,
hypot,
identity,
iinfo,
indices,
inexact,
inf,
inner,
int8,
int16,
int32,
int64,
int_,
intc,
integer,
intp,
invert,
is_busday,
isclose,
isdtype,
isfinite,
isfortran,
isinf,
isnan,
isnat,
isscalar,
issubdtype,
lcm,
ldexp,
left_shift,
less,
less_equal,
lexsort,
linspace,
little_endian,
log,
log1p,
log2,
log10,
logaddexp,
logaddexp2,
logical_and,
logical_not,
logical_or,
logical_xor,
logspace,
long,
longdouble,
longlong,
matmul,
matrix_transpose,
matvec,
max,
maximum,
may_share_memory,
mean,
memmap,
min,
min_scalar_type,
minimum,
mod,
modf,
moveaxis,
multiply,
nan,
ndarray,
ndim,
nditer,
negative,
nested_iters,
newaxis,
nextafter,
nonzero,
not_equal,
number,
object_,
ones,
ones_like,
outer,
partition,
permute_dims,
pi,
positive,
pow,
power,
printoptions,
prod,
promote_types,
ptp,
put,
putmask,
rad2deg,
radians,
ravel,
recarray,
reciprocal,
record,
remainder,
repeat,
require,
reshape,
resize,
result_type,
right_shift,
rint,
roll,
rollaxis,
round,
sctypeDict,
searchsorted,
set_printoptions,
setbufsize,
seterr,
seterrcall,
shape,
shares_memory,
short,
sign,
signbit,
signedinteger,
sin,
single,
sinh,
size,
sort,
spacing,
sqrt,
square,
squeeze,
stack,
std,
str_,
subtract,
sum,
swapaxes,
take,
tan,
tanh,
tensordot,
timedelta64,
trace,
transpose,
true_divide,
trunc,
typecodes,
ubyte,
ufunc,
uint,
uint8,
uint16,
uint32,
uint64,
uintc,
uintp,
ulong,
ulonglong,
unsignedinteger,
unstack,
ushort,
var,
vdot,
vecdot,
vecmat,
void,
vstack,
where,
zeros,
zeros_like,
)
# NOTE: It's still under discussion whether these aliases
# should be removed.
for ta in ["float96", "float128", "complex192", "complex256"]:
try:
globals()[ta] = getattr(_core, ta)
except AttributeError:
pass
del ta
from . import lib
from . import matrixlib as _mat
from .lib import scimath as emath
from .lib._arraypad_impl import pad
from .lib._arraysetops_impl import (
ediff1d,
in1d,
intersect1d,
isin,
setdiff1d,
setxor1d,
union1d,
unique,
unique_all,
unique_counts,
unique_inverse,
unique_values,
)
from .lib._function_base_impl import (
angle,
append,
asarray_chkfinite,
average,
bartlett,
bincount,
blackman,
copy,
corrcoef,
cov,
delete,
diff,
digitize,
extract,
flip,
gradient,
hamming,
hanning,
i0,
insert,
interp,
iterable,
kaiser,
median,
meshgrid,
percentile,
piecewise,
place,
quantile,
rot90,
select,
sinc,
sort_complex,
trapezoid,
trapz,
trim_zeros,
unwrap,
vectorize,
)
from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd
from .lib._index_tricks_impl import (
c_,
diag_indices,
diag_indices_from,
fill_diagonal,
index_exp,
ix_,
mgrid,
ndenumerate,
ndindex,
ogrid,
r_,
ravel_multi_index,
s_,
unravel_index,
)
from .lib._nanfunctions_impl import (
nanargmax,
nanargmin,
nancumprod,
nancumsum,
nanmax,
nanmean,
nanmedian,
nanmin,
nanpercentile,
nanprod,
nanquantile,
nanstd,
nansum,
nanvar,
)
from .lib._npyio_impl import (
fromregex,
genfromtxt,
load,
loadtxt,
packbits,
save,
savetxt,
savez,
savez_compressed,
unpackbits,
)
from .lib._polynomial_impl import (
poly,
poly1d,
polyadd,
polyder,
polydiv,
polyfit,
polyint,
polymul,
polysub,
polyval,
roots,
)
from .lib._shape_base_impl import (
apply_along_axis,
apply_over_axes,
array_split,
column_stack,
dsplit,
dstack,
expand_dims,
hsplit,
kron,
put_along_axis,
row_stack,
split,
take_along_axis,
tile,
vsplit,
)
from .lib._stride_tricks_impl import (
broadcast_arrays,
broadcast_shapes,
broadcast_to,
)
from .lib._twodim_base_impl import (
diag,
diagflat,
eye,
fliplr,
flipud,
histogram2d,
mask_indices,
tri,
tril,
tril_indices,
tril_indices_from,
triu,
triu_indices,
triu_indices_from,
vander,
)
from .lib._type_check_impl import (
common_type,
imag,
iscomplex,
iscomplexobj,
isreal,
isrealobj,
mintypecode,
nan_to_num,
real,
real_if_close,
typename,
)
from .lib._ufunclike_impl import fix, isneginf, isposinf
from .lib._utils_impl import get_include, info, show_runtime
from .matrixlib import asmatrix, bmat, matrix
# public submodules are imported lazily, therefore are accessible from
# __getattr__. Note that `distutils` (deprecated) and `array_api`
# (experimental label) are not added here, because `from numpy import *`
# must not raise any warnings - that's too disruptive.
__numpy_submodules__ = {
"linalg", "fft", "dtypes", "random", "polynomial", "ma",
"exceptions", "lib", "ctypeslib", "testing", "typing",
"f2py", "test", "rec", "char", "core", "strings",
}
# We build warning messages for former attributes
_msg = (
"module 'numpy' has no attribute '{n}'.\n"
"`np.{n}` was a deprecated alias for the builtin `{n}`. "
"To avoid this error in existing code, use `{n}` by itself. "
"Doing this will not modify any behavior and is safe. {extended_msg}\n"
"The aliases was originally deprecated in NumPy 1.20; for more "
"details and guidance see the original release note at:\n"
" https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
_specific_msg = (
"If you specifically wanted the numpy scalar type, use `np.{}` here.")
_int_extended_msg = (
"When replacing `np.{}`, you may wish to use e.g. `np.int64` "
"or `np.int32` to specify the precision. If you wish to review "
"your current use, check the release note link for "
"additional information.")
_type_info = [
("object", ""), # The NumPy scalar only exists by name.
("float", _specific_msg.format("float64")),
("complex", _specific_msg.format("complex128")),
("str", _specific_msg.format("str_")),
("int", _int_extended_msg.format("int"))]
__former_attrs__ = {
n: _msg.format(n=n, extended_msg=extended_msg)
for n, extended_msg in _type_info
}
# Some of these could be defined right away, but most were aliases to
# the Python objects and only removed in NumPy 1.24. Defining them should
# probably wait for NumPy 1.26 or 2.0.
# When defined, these should possibly not be added to `__all__` to avoid
# import with `from numpy import *`.
__future_scalars__ = {"str", "bytes", "object"}
__array_api_version__ = "2024.12"
from ._array_api_info import __array_namespace_info__
# now that numpy core module is imported, can initialize limits
_core.getlimits._register_known_types()
__all__ = list(
__numpy_submodules__ |
set(_core.__all__) |
set(_mat.__all__) |
set(lib._histograms_impl.__all__) |
set(lib._nanfunctions_impl.__all__) |
set(lib._function_base_impl.__all__) |
set(lib._twodim_base_impl.__all__) |
set(lib._shape_base_impl.__all__) |
set(lib._type_check_impl.__all__) |
set(lib._arraysetops_impl.__all__) |
set(lib._ufunclike_impl.__all__) |
set(lib._arraypad_impl.__all__) |
set(lib._utils_impl.__all__) |
set(lib._stride_tricks_impl.__all__) |
set(lib._polynomial_impl.__all__) |
set(lib._npyio_impl.__all__) |
set(lib._index_tricks_impl.__all__) |
{"emath", "show_config", "__version__", "__array_namespace_info__"}
)
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
def __getattr__(attr):
# Warn for expired attributes
import warnings
if attr == "linalg":
import numpy.linalg as linalg
return linalg
elif attr == "fft":
import numpy.fft as fft
return fft
elif attr == "dtypes":
import numpy.dtypes as dtypes
return dtypes
elif attr == "random":
import numpy.random as random
return random
elif attr == "polynomial":
import numpy.polynomial as polynomial
return polynomial
elif attr == "ma":
import numpy.ma as ma
return ma
elif attr == "ctypeslib":
import numpy.ctypeslib as ctypeslib
return ctypeslib
elif attr == "exceptions":
import numpy.exceptions as exceptions
return exceptions
elif attr == "testing":
import numpy.testing as testing
return testing
elif attr == "matlib":
import numpy.matlib as matlib
return matlib
elif attr == "f2py":
import numpy.f2py as f2py
return f2py
elif attr == "typing":
import numpy.typing as typing
return typing
elif attr == "rec":
import numpy.rec as rec
return rec
elif attr == "char":
import numpy.char as char
return char
elif attr == "array_api":
raise AttributeError("`numpy.array_api` is not available from "
"numpy 2.0 onwards", name=None)
elif attr == "core":
import numpy.core as core
return core
elif attr == "strings":
import numpy.strings as strings
return strings
elif attr == "distutils":
if 'distutils' in __numpy_submodules__:
import numpy.distutils as distutils
return distutils
else:
raise AttributeError("`numpy.distutils` is not available from "
"Python 3.12 onwards", name=None)
if attr in __future_scalars__:
# And future warnings for those that will change, but also give
# the AttributeError
warnings.warn(
f"In the future `np.{attr}` will be defined as the "
"corresponding NumPy scalar.", FutureWarning, stacklevel=2)
if attr in __former_attrs__:
raise AttributeError(__former_attrs__[attr], name=None)
if attr in __expired_attributes__:
raise AttributeError(
f"`np.{attr}` was removed in the NumPy 2.0 release. "
f"{__expired_attributes__[attr]}",
name=None
)
if attr == "chararray":
warnings.warn(
"`np.chararray` is deprecated and will be removed from "
"the main namespace in the future. Use an array with a string "
"or bytes dtype instead.", DeprecationWarning, stacklevel=2)
import numpy.char as char
return char.chararray
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
def __dir__():
public_symbols = (
globals().keys() | __numpy_submodules__
)
public_symbols -= {
"matrixlib", "matlib", "tests", "conftest", "version",
"distutils", "array_api"
}
return list(public_symbols)
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
"""
try:
x = ones(2, dtype=float32)
if not abs(x.dot(x) - float32(2.0)) < 1e-5:
raise AssertionError
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
"by incorrect BLAS library being linked in, or by mixing "
"package managers (pip, conda, apt, ...). Search closed "
"numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__)) from None
_sanity_check()
del _sanity_check
def _mac_os_check():
"""
Quick Sanity check for Mac OS look for accelerate build bugs.
Testing numpy polyfit calls init_dgelsd(LAPACK)
"""
try:
c = array([3., 2., 1.])
x = linspace(0, 2, 5)
y = polyval(c, x)
_ = polyfit(x, y, 2, cov=True)
except ValueError:
pass
if sys.platform == "darwin":
from . import exceptions
with warnings.catch_warnings(record=True) as w:
_mac_os_check()
# Throw runtime error, if the test failed
# Check for warning and report the error_message
if len(w) > 0:
for _wn in w:
if _wn.category is exceptions.RankWarning:
# Ignore other warnings, they may not be relevant (see gh-25433)
error_message = (
f"{_wn.category.__name__}: {_wn.message}"
)
msg = (
"Polyfit sanity test emitted a warning, most likely due "
"to using a buggy Accelerate backend."
"\nIf you compiled yourself, more information is available at:" # noqa: E501
"\nhttps://numpy.org/devdocs/building/index.html"
"\nOtherwise report this to the vendor "
f"that provided NumPy.\n\n{error_message}\n")
raise RuntimeError(msg)
del _wn
del w
del _mac_os_check
def hugepage_setup():
"""
We usually use madvise hugepages support, but on some old kernels it
is slow and thus better avoided. Specifically kernel version 4.6
had a bug fix which probably fixed this:
https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
"""
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
if sys.platform == "linux" and use_hugepage is None:
# If there is an issue with parsing the kernel version,
# set use_hugepage to 0. Usage of LooseVersion will handle
# the kernel version parsing better, but avoided since it
# will increase the import time.
# See: #16679 for related discussion.
try:
use_hugepage = 1
kernel_version = os.uname().release.split(".")[:2]
kernel_version = tuple(int(v) for v in kernel_version)
if kernel_version < (4, 6):
use_hugepage = 0
except ValueError:
use_hugepage = 0
elif use_hugepage is None:
# This is not Linux, so it should not matter, just enable anyway
use_hugepage = 1
else:
use_hugepage = int(use_hugepage)
return use_hugepage
# Note that this will currently only make a difference on Linux
_core.multiarray._set_madvise_hugepage(hugepage_setup())
del hugepage_setup
# Give a warning if NumPy is reloaded or imported on a sub-interpreter
# We do this from python, since the C-module may not be reloaded and
# it is tidier organized.
_core.multiarray._multiarray_umath._reload_guard()
# TODO: Remove the environment variable entirely now that it is "weak"
if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"):
warnings.warn(
"NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 "
"transition and is ignored after NumPy 2.2.",
UserWarning, stacklevel=2)
# Tell PyInstaller where to find hook-numpy.py
def _pyinstaller_hooks_dir():
from pathlib import Path
return [str(Path(__file__).with_name("_pyinstaller").resolve())]
# Remove symbols imported for internal use
del os, sys, warnings
venv\Lib\site-packages\packaging\markers.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
import operator
import os
import platform
import sys
from typing import AbstractSet, Any, Callable, Literal, TypedDict, Union, cast
from ._parser import MarkerAtom, MarkerList, Op, Value, Variable
from ._parser import parse_marker as _parse_marker
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
__all__ = [
"EvaluateContext",
"InvalidMarker",
"Marker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"default_environment",
]
Operator = Callable[[str, Union[str, AbstractSet[str]]], bool]
EvaluateContext = Literal["metadata", "lock_file", "requirement"]
MARKERS_ALLOWING_SET = {"extras", "dependency_groups"}
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Environment(TypedDict):
implementation_name: str
"""The implementation's identifier, e.g. ``'cpython'``."""
implementation_version: str
"""
The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or
``'7.3.13'`` for PyPy3.10 v7.3.13.
"""
os_name: str
"""
The value of :py:data:`os.name`. The name of the operating system dependent module
imported, e.g. ``'posix'``.
"""
platform_machine: str
"""
Returns the machine type, e.g. ``'i386'``.
An empty string if the value cannot be determined.
"""
platform_release: str
"""
The system's release, e.g. ``'2.2.0'`` or ``'NT'``.
An empty string if the value cannot be determined.
"""
platform_system: str
"""
The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``.
An empty string if the value cannot be determined.
"""
platform_version: str
"""
The system's release version, e.g. ``'#3 on degas'``.
An empty string if the value cannot be determined.
"""
python_full_version: str
"""
The Python version as string ``'major.minor.patchlevel'``.
Note that unlike the Python :py:data:`sys.version`, this value will always include
the patchlevel (it defaults to 0).
"""
platform_python_implementation: str
"""
A string identifying the Python implementation, e.g. ``'CPython'``.
"""
python_version: str
"""The Python version as string ``'major.minor'``."""
sys_platform: str
"""
This string contains a platform identifier that can be used to append
platform-specific components to :py:data:`sys.path`, for instance.
For Unix systems, except on Linux and AIX, this is the lowercased OS name as
returned by ``uname -s`` with the first part of the version as returned by
``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python
was built.
"""
def _normalize_extra_values(results: Any) -> Any:
"""
Normalize extra values.
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results
def _format_marker(
marker: list[str] | MarkerAtom | str, first: bool | None = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str | AbstractSet[str]) -> bool:
if isinstance(rhs, str):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs, prereleases=True)
oper: Operator | None = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
def _normalize(
lhs: str, rhs: str | AbstractSet[str], key: str
) -> tuple[str, str | AbstractSet[str]]:
# PEP 685 – Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
assert isinstance(rhs, str), "extra value must be a string"
return (canonicalize_name(lhs), canonicalize_name(rhs))
if key in MARKERS_ALLOWING_SET:
if isinstance(rhs, str): # pragma: no cover
return (canonicalize_name(lhs), canonicalize_name(rhs))
else:
return (canonicalize_name(lhs), {canonicalize_name(v) for v in rhs})
# other environment markers don't have such standards
return lhs, rhs
def _evaluate_markers(
markers: MarkerList, environment: dict[str, str | AbstractSet[str]]
) -> bool:
groups: list[list[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value
else:
lhs_value = lhs.value
environment_key = rhs.value
rhs_value = environment[environment_key]
assert isinstance(lhs_value, str), "lhs must be a string"
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: sys._version_info) -> str:
version = f"{info.major}.{info.minor}.{info.micro}"
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Environment:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker:
def __init__(self, marker: str) -> None:
# Note: We create a Marker object without calling this constructor in
# packaging.requirements.Requirement. If any additional logic is
# added here, make sure to mirror/adapt Requirement.
try:
self._markers = _normalize_extra_values(_parse_marker(marker))
# The attribute `_markers` can be described in terms of a recursive type:
# MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
#
# For example, the following expression:
# python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
#
# is parsed into:
# [
# (, ')>, ),
# 'and',
# [
# (, , ),
# 'or',
# (, , )
# ]
# ]
except ParserSyntaxError as e:
raise InvalidMarker(str(e)) from e
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f""
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Marker):
return NotImplemented
return str(self) == str(other)
def evaluate(
self,
environment: dict[str, str] | None = None,
context: EvaluateContext = "metadata",
) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment. The *context* parameter specifies what
context the markers are being evaluated for, which influences what markers
are considered valid. Acceptable values are "metadata" (for core metadata;
default), "lock_file", and "requirement" (i.e. all other situations).
The environment is determined from the current Python process.
"""
current_environment = cast(
"dict[str, str | AbstractSet[str]]", default_environment()
)
if context == "lock_file":
current_environment.update(
extras=frozenset(), dependency_groups=frozenset()
)
elif context == "metadata":
current_environment["extra"] = ""
if environment is not None:
current_environment.update(environment)
# The API used to allow setting extra to None. We need to handle this
# case for backwards compatibility.
if "extra" in current_environment and current_environment["extra"] is None:
current_environment["extra"] = ""
return _evaluate_markers(
self._markers, _repair_python_full_version(current_environment)
)
def _repair_python_full_version(
env: dict[str, str | AbstractSet[str]],
) -> dict[str, str | AbstractSet[str]]:
"""
Work around platform.python_version() returning something that is not PEP 440
compliant for non-tagged Python builds.
"""
python_full_version = cast(str, env["python_full_version"])
if python_full_version.endswith("+"):
env["python_full_version"] = f"{python_full_version}local"
return env
venv\Lib\site-packages\packaging\metadata.py
from __future__ import annotations
import email.feedparser
import email.header
import email.message
import email.parser
import email.policy
import pathlib
import sys
import typing
from typing import (
Any,
Callable,
Generic,
Literal,
TypedDict,
cast,
)
from . import licenses, requirements, specifiers, utils
from . import version as version_module
from .licenses import NormalizedLicenseExpression
T = typing.TypeVar("T")
if sys.version_info >= (3, 11): # pragma: no cover
ExceptionGroup = ExceptionGroup
else: # pragma: no cover
class ExceptionGroup(Exception):
"""A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11.
If :external:exc:`ExceptionGroup` is already defined by Python itself,
that version is used instead.
"""
message: str
exceptions: list[Exception]
def __init__(self, message: str, exceptions: list[Exception]) -> None:
self.message = message
self.exceptions = exceptions
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})"
class InvalidMetadata(ValueError):
"""A metadata field contains invalid data."""
field: str
"""The name of the field that contains invalid data."""
def __init__(self, field: str, message: str) -> None:
self.field = field
super().__init__(message)
# The RawMetadata class attempts to make as few assumptions about the underlying
# serialization formats as possible. The idea is that as long as a serialization
# formats offer some very basic primitives in *some* way then we can support
# serializing to and from that format.
class RawMetadata(TypedDict, total=False):
"""A dictionary of raw core metadata.
Each field in core metadata maps to a key of this dictionary (when data is
provided). The key is lower-case and underscores are used instead of dashes
compared to the equivalent core metadata field. Any core metadata field that
can be specified multiple times or can hold multiple values in a single
field have a key with a plural name. See :class:`Metadata` whose attributes
match the keys of this dictionary.
Core metadata fields that can be specified multiple times are stored as a
list or dict depending on which is appropriate for the field. Any fields
which hold multiple values in a single field are stored as a list.
"""
# Metadata 1.0 - PEP 241
metadata_version: str
name: str
version: str
platforms: list[str]
summary: str
description: str
keywords: list[str]
home_page: str
author: str
author_email: str
license: str
# Metadata 1.1 - PEP 314
supported_platforms: list[str]
download_url: str
classifiers: list[str]
requires: list[str]
provides: list[str]
obsoletes: list[str]
# Metadata 1.2 - PEP 345
maintainer: str
maintainer_email: str
requires_dist: list[str]
provides_dist: list[str]
obsoletes_dist: list[str]
requires_python: str
requires_external: list[str]
project_urls: dict[str, str]
# Metadata 2.0
# PEP 426 attempted to completely revamp the metadata format
# but got stuck without ever being able to build consensus on
# it and ultimately ended up withdrawn.
#
# However, a number of tools had started emitting METADATA with
# `2.0` Metadata-Version, so for historical reasons, this version
# was skipped.
# Metadata 2.1 - PEP 566
description_content_type: str
provides_extra: list[str]
# Metadata 2.2 - PEP 643
dynamic: list[str]
# Metadata 2.3 - PEP 685
# No new fields were added in PEP 685, just some edge case were
# tightened up to provide better interoptability.
# Metadata 2.4 - PEP 639
license_expression: str
license_files: list[str]
_STRING_FIELDS = {
"author",
"author_email",
"description",
"description_content_type",
"download_url",
"home_page",
"license",
"license_expression",
"maintainer",
"maintainer_email",
"metadata_version",
"name",
"requires_python",
"summary",
"version",
}
_LIST_FIELDS = {
"classifiers",
"dynamic",
"license_files",
"obsoletes",
"obsoletes_dist",
"platforms",
"provides",
"provides_dist",
"provides_extra",
"requires",
"requires_dist",
"requires_external",
"supported_platforms",
}
_DICT_FIELDS = {
"project_urls",
}
def _parse_keywords(data: str) -> list[str]:
"""Split a string of comma-separated keywords into a list of keywords."""
return [k.strip() for k in data.split(",")]
def _parse_project_urls(data: list[str]) -> dict[str, str]:
"""Parse a list of label/URL string pairings separated by a comma."""
urls = {}
for pair in data:
# Our logic is slightly tricky here as we want to try and do
# *something* reasonable with malformed data.
#
# The main thing that we have to worry about, is data that does
# not have a ',' at all to split the label from the Value. There
# isn't a singular right answer here, and we will fail validation
# later on (if the caller is validating) so it doesn't *really*
# matter, but since the missing value has to be an empty str
# and our return value is dict[str, str], if we let the key
# be the missing value, then they'd have multiple '' values that
# overwrite each other in a accumulating dict.
#
# The other potentional issue is that it's possible to have the
# same label multiple times in the metadata, with no solid "right"
# answer with what to do in that case. As such, we'll do the only
# thing we can, which is treat the field as unparseable and add it
# to our list of unparsed fields.
parts = [p.strip() for p in pair.split(",", 1)]
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
# TODO: The spec doesn't say anything about if the keys should be
# considered case sensitive or not... logically they should
# be case-preserving and case-insensitive, but doing that
# would open up more cases where we might have duplicate
# entries.
label, url = parts
if label in urls:
# The label already exists in our set of urls, so this field
# is unparseable, and we can just add the whole thing to our
# unparseable data and stop processing it.
raise KeyError("duplicate labels in project urls")
urls[label] = url
return urls
def _get_payload(msg: email.message.Message, source: bytes | str) -> str:
"""Get the body of the message."""
# If our source is a str, then our caller has managed encodings for us,
# and we don't need to deal with it.
if isinstance(source, str):
payload = msg.get_payload()
assert isinstance(payload, str)
return payload
# If our source is a bytes, then we're managing the encoding and we need
# to deal with it.
else:
bpayload = msg.get_payload(decode=True)
assert isinstance(bpayload, bytes)
try:
return bpayload.decode("utf8", "strict")
except UnicodeDecodeError as exc:
raise ValueError("payload in an invalid encoding") from exc
# The various parse_FORMAT functions here are intended to be as lenient as
# possible in their parsing, while still returning a correctly typed
# RawMetadata.
#
# To aid in this, we also generally want to do as little touching of the
# data as possible, except where there are possibly some historic holdovers
# that make valid data awkward to work with.
#
# While this is a lower level, intermediate format than our ``Metadata``
# class, some light touch ups can make a massive difference in usability.
# Map METADATA fields to RawMetadata.
_EMAIL_TO_RAW_MAPPING = {
"author": "author",
"author-email": "author_email",
"classifier": "classifiers",
"description": "description",
"description-content-type": "description_content_type",
"download-url": "download_url",
"dynamic": "dynamic",
"home-page": "home_page",
"keywords": "keywords",
"license": "license",
"license-expression": "license_expression",
"license-file": "license_files",
"maintainer": "maintainer",
"maintainer-email": "maintainer_email",
"metadata-version": "metadata_version",
"name": "name",
"obsoletes": "obsoletes",
"obsoletes-dist": "obsoletes_dist",
"platform": "platforms",
"project-url": "project_urls",
"provides": "provides",
"provides-dist": "provides_dist",
"provides-extra": "provides_extra",
"requires": "requires",
"requires-dist": "requires_dist",
"requires-external": "requires_external",
"requires-python": "requires_python",
"summary": "summary",
"supported-platform": "supported_platforms",
"version": "version",
}
_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()}
def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]:
"""Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``).
This function returns a two-item tuple of dicts. The first dict is of
recognized fields from the core metadata specification. Fields that can be
parsed and translated into Python's built-in types are converted
appropriately. All other fields are left as-is. Fields that are allowed to
appear multiple times are stored as lists.
The second dict contains all other fields from the metadata. This includes
any unrecognized fields. It also includes any fields which are expected to
be parsed into a built-in type but were not formatted appropriately. Finally,
any fields that are expected to appear only once but are repeated are
included in this dict.
"""
raw: dict[str, str | list[str] | dict[str, str]] = {}
unparsed: dict[str, list[str]] = {}
if isinstance(data, str):
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
else:
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
# We have to wrap parsed.keys() in a set, because in the case of multiple
# values for a key (a list), the key will appear multiple times in the
# list of keys, but we're avoiding that by using get_all().
for name in frozenset(parsed.keys()):
# Header names in RFC are case insensitive, so we'll normalize to all
# lower case to make comparisons easier.
name = name.lower()
# We use get_all() here, even for fields that aren't multiple use,
# because otherwise someone could have e.g. two Name fields, and we
# would just silently ignore it rather than doing something about it.
headers = parsed.get_all(name) or []
# The way the email module works when parsing bytes is that it
# unconditionally decodes the bytes as ascii using the surrogateescape
# handler. When you pull that data back out (such as with get_all() ),
# it looks to see if the str has any surrogate escapes, and if it does
# it wraps it in a Header object instead of returning the string.
#
# As such, we'll look for those Header objects, and fix up the encoding.
value = []
# Flag if we have run into any issues processing the headers, thus
# signalling that the data belongs in 'unparsed'.
valid_encoding = True
for h in headers:
# It's unclear if this can return more types than just a Header or
# a str, so we'll just assert here to make sure.
assert isinstance(h, (email.header.Header, str))
# If it's a header object, we need to do our little dance to get
# the real data out of it. In cases where there is invalid data
# we're going to end up with mojibake, but there's no obvious, good
# way around that without reimplementing parts of the Header object
# ourselves.
#
# That should be fine since, if mojibacked happens, this key is
# going into the unparsed dict anyways.
if isinstance(h, email.header.Header):
# The Header object stores it's data as chunks, and each chunk
# can be independently encoded, so we'll need to check each
# of them.
chunks: list[tuple[bytes, str | None]] = []
for bin, encoding in email.header.decode_header(h):
try:
bin.decode("utf8", "strict")
except UnicodeDecodeError:
# Enable mojibake.
encoding = "latin1"
valid_encoding = False
else:
encoding = "utf8"
chunks.append((bin, encoding))
# Turn our chunks back into a Header object, then let that
# Header object do the right thing to turn them into a
# string for us.
value.append(str(email.header.make_header(chunks)))
# This is already a string, so just add it.
else:
value.append(h)
# We've processed all of our values to get them into a list of str,
# but we may have mojibake data, in which case this is an unparsed
# field.
if not valid_encoding:
unparsed[name] = value
continue
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
if raw_name is None:
# This is a bit of a weird situation, we've encountered a key that
# we don't know what it means, so we don't know whether it's meant
# to be a list or not.
#
# Since we can't really tell one way or another, we'll just leave it
# as a list, even though it may be a single item list, because that's
# what makes the most sense for email headers.
unparsed[name] = value
continue
# If this is one of our string fields, then we'll check to see if our
# value is a list of a single item. If it is then we'll assume that
# it was emitted as a single string, and unwrap the str from inside
# the list.
#
# If it's any other kind of data, then we haven't the faintest clue
# what we should parse it as, and we have to just add it to our list
# of unparsed stuff.
if raw_name in _STRING_FIELDS and len(value) == 1:
raw[raw_name] = value[0]
# If this is one of our list of string fields, then we can just assign
# the value, since email *only* has strings, and our get_all() call
# above ensures that this is a list.
elif raw_name in _LIST_FIELDS:
raw[raw_name] = value
# Special Case: Keywords
# The keywords field is implemented in the metadata spec as a str,
# but it conceptually is a list of strings, and is serialized using
# ", ".join(keywords), so we'll do some light data massaging to turn
# this into what it logically is.
elif raw_name == "keywords" and len(value) == 1:
raw[raw_name] = _parse_keywords(value[0])
# Special Case: Project-URL
# The project urls is implemented in the metadata spec as a list of
# specially-formatted strings that represent a key and a value, which
# is fundamentally a mapping, however the email format doesn't support
# mappings in a sane way, so it was crammed into a list of strings
# instead.
#
# We will do a little light data massaging to turn this into a map as
# it logically should be.
elif raw_name == "project_urls":
try:
raw[raw_name] = _parse_project_urls(value)
except KeyError:
unparsed[name] = value
# Nothing that we've done has managed to parse this, so it'll just
# throw it in our unparseable data and move on.
else:
unparsed[name] = value
# We need to support getting the Description from the message payload in
# addition to getting it from the the headers. This does mean, though, there
# is the possibility of it being set both ways, in which case we put both
# in 'unparsed' since we don't know which is right.
try:
payload = _get_payload(parsed, data)
except ValueError:
unparsed.setdefault("description", []).append(
parsed.get_payload(decode=isinstance(data, bytes)) # type: ignore[call-overload]
)
else:
if payload:
# Check to see if we've already got a description, if so then both
# it, and this body move to unparseable.
if "description" in raw:
description_header = cast(str, raw.pop("description"))
unparsed.setdefault("description", []).extend(
[description_header, payload]
)
elif "description" in unparsed:
unparsed["description"].append(payload)
else:
raw["description"] = payload
# We need to cast our `raw` to a metadata, because a TypedDict only support
# literal key names, but we're computing our key names on purpose, but the
# way this function is implemented, our `TypedDict` can only have valid key
# names.
return cast(RawMetadata, raw), unparsed
_NOT_FOUND = object()
# Keep the two values in sync.
_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"]
_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"]
_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"])
class _Validator(Generic[T]):
"""Validate a metadata field.
All _process_*() methods correspond to a core metadata field. The method is
called with the field's raw value. If the raw value is valid it is returned
in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field).
If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause
as appropriate).
"""
name: str
raw_name: str
added: _MetadataVersion
def __init__(
self,
*,
added: _MetadataVersion = "1.0",
) -> None:
self.added = added
def __set_name__(self, _owner: Metadata, name: str) -> None:
self.name = name
self.raw_name = _RAW_TO_EMAIL_MAPPING[name]
def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T:
# With Python 3.8, the caching can be replaced with functools.cached_property().
# No need to check the cache as attribute lookup will resolve into the
# instance's __dict__ before __get__ is called.
cache = instance.__dict__
value = instance._raw.get(self.name)
# To make the _process_* methods easier, we'll check if the value is None
# and if this field is NOT a required attribute, and if both of those
# things are true, we'll skip the the converter. This will mean that the
# converters never have to deal with the None union.
if self.name in _REQUIRED_ATTRS or value is not None:
try:
converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}")
except AttributeError:
pass
else:
value = converter(value)
cache[self.name] = value
try:
del instance._raw[self.name] # type: ignore[misc]
except KeyError:
pass
return cast(T, value)
def _invalid_metadata(
self, msg: str, cause: Exception | None = None
) -> InvalidMetadata:
exc = InvalidMetadata(
self.raw_name, msg.format_map({"field": repr(self.raw_name)})
)
exc.__cause__ = cause
return exc
def _process_metadata_version(self, value: str) -> _MetadataVersion:
# Implicitly makes Metadata-Version required.
if value not in _VALID_METADATA_VERSIONS:
raise self._invalid_metadata(f"{value!r} is not a valid metadata version")
return cast(_MetadataVersion, value)
def _process_name(self, value: str) -> str:
if not value:
raise self._invalid_metadata("{field} is a required field")
# Validate the name as a side-effect.
try:
utils.canonicalize_name(value, validate=True)
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
else:
return value
def _process_version(self, value: str) -> version_module.Version:
if not value:
raise self._invalid_metadata("{field} is a required field")
try:
return version_module.parse(value)
except version_module.InvalidVersion as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_summary(self, value: str) -> str:
"""Check the field contains no newlines."""
if "\n" in value:
raise self._invalid_metadata("{field} must be a single line")
return value
def _process_description_content_type(self, value: str) -> str:
content_types = {"text/plain", "text/x-rst", "text/markdown"}
message = email.message.EmailMessage()
message["content-type"] = value
content_type, parameters = (
# Defaults to `text/plain` if parsing failed.
message.get_content_type().lower(),
message["content-type"].params,
)
# Check if content-type is valid or defaulted to `text/plain` and thus was
# not parseable.
if content_type not in content_types or content_type not in value.lower():
raise self._invalid_metadata(
f"{{field}} must be one of {list(content_types)}, not {value!r}"
)
charset = parameters.get("charset", "UTF-8")
if charset != "UTF-8":
raise self._invalid_metadata(
f"{{field}} can only specify the UTF-8 charset, not {list(charset)}"
)
markdown_variants = {"GFM", "CommonMark"}
variant = parameters.get("variant", "GFM") # Use an acceptable default.
if content_type == "text/markdown" and variant not in markdown_variants:
raise self._invalid_metadata(
f"valid Markdown variants for {{field}} are {list(markdown_variants)}, "
f"not {variant!r}",
)
return value
def _process_dynamic(self, value: list[str]) -> list[str]:
for dynamic_field in map(str.lower, value):
if dynamic_field in {"name", "version", "metadata-version"}:
raise self._invalid_metadata(
f"{dynamic_field!r} is not allowed as a dynamic field"
)
elif dynamic_field not in _EMAIL_TO_RAW_MAPPING:
raise self._invalid_metadata(
f"{dynamic_field!r} is not a valid dynamic field"
)
return list(map(str.lower, value))
def _process_provides_extra(
self,
value: list[str],
) -> list[utils.NormalizedName]:
normalized_names = []
try:
for name in value:
normalized_names.append(utils.canonicalize_name(name, validate=True))
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{name!r} is invalid for {{field}}", cause=exc
) from exc
else:
return normalized_names
def _process_requires_python(self, value: str) -> specifiers.SpecifierSet:
try:
return specifiers.SpecifierSet(value)
except specifiers.InvalidSpecifier as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_requires_dist(
self,
value: list[str],
) -> list[requirements.Requirement]:
reqs = []
try:
for req in value:
reqs.append(requirements.Requirement(req))
except requirements.InvalidRequirement as exc:
raise self._invalid_metadata(
f"{req!r} is invalid for {{field}}", cause=exc
) from exc
else:
return reqs
def _process_license_expression(
self, value: str
) -> NormalizedLicenseExpression | None:
try:
return licenses.canonicalize_license_expression(value)
except ValueError as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_license_files(self, value: list[str]) -> list[str]:
paths = []
for path in value:
if ".." in path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, "
"parent directory indicators are not allowed"
)
if "*" in path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must be resolved"
)
if (
pathlib.PurePosixPath(path).is_absolute()
or pathlib.PureWindowsPath(path).is_absolute()
):
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must be relative"
)
if pathlib.PureWindowsPath(path).as_posix() != path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must use '/' delimiter"
)
paths.append(path)
return paths
class Metadata:
"""Representation of distribution metadata.
Compared to :class:`RawMetadata`, this class provides objects representing
metadata fields instead of only using built-in types. Any invalid metadata
will cause :exc:`InvalidMetadata` to be raised (with a
:py:attr:`~BaseException.__cause__` attribute as appropriate).
"""
_raw: RawMetadata
@classmethod
def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata:
"""Create an instance from :class:`RawMetadata`.
If *validate* is true, all metadata will be validated. All exceptions
related to validation will be gathered and raised as an :class:`ExceptionGroup`.
"""
ins = cls()
ins._raw = data.copy() # Mutations occur due to caching enriched values.
if validate:
exceptions: list[Exception] = []
try:
metadata_version = ins.metadata_version
metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version)
except InvalidMetadata as metadata_version_exc:
exceptions.append(metadata_version_exc)
metadata_version = None
# Make sure to check for the fields that are present, the required
# fields (so their absence can be reported).
fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS
# Remove fields that have already been checked.
fields_to_check -= {"metadata_version"}
for key in fields_to_check:
try:
if metadata_version:
# Can't use getattr() as that triggers descriptor protocol which
# will fail due to no value for the instance argument.
try:
field_metadata_version = cls.__dict__[key].added
except KeyError:
exc = InvalidMetadata(key, f"unrecognized field: {key!r}")
exceptions.append(exc)
continue
field_age = _VALID_METADATA_VERSIONS.index(
field_metadata_version
)
if field_age > metadata_age:
field = _RAW_TO_EMAIL_MAPPING[key]
exc = InvalidMetadata(
field,
f"{field} introduced in metadata version "
f"{field_metadata_version}, not {metadata_version}",
)
exceptions.append(exc)
continue
getattr(ins, key)
except InvalidMetadata as exc:
exceptions.append(exc)
if exceptions:
raise ExceptionGroup("invalid metadata", exceptions)
return ins
@classmethod
def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata:
"""Parse metadata from email headers.
If *validate* is true, the metadata will be validated. All exceptions
related to validation will be gathered and raised as an :class:`ExceptionGroup`.
"""
raw, unparsed = parse_email(data)
if validate:
exceptions: list[Exception] = []
for unparsed_key in unparsed:
if unparsed_key in _EMAIL_TO_RAW_MAPPING:
message = f"{unparsed_key!r} has invalid data"
else:
message = f"unrecognized field: {unparsed_key!r}"
exceptions.append(InvalidMetadata(unparsed_key, message))
if exceptions:
raise ExceptionGroup("unparsed", exceptions)
try:
return cls.from_raw(raw, validate=validate)
except ExceptionGroup as exc_group:
raise ExceptionGroup(
"invalid or unparsed metadata", exc_group.exceptions
) from None
metadata_version: _Validator[_MetadataVersion] = _Validator()
""":external:ref:`core-metadata-metadata-version`
(required; validated to be a valid metadata version)"""
# `name` is not normalized/typed to NormalizedName so as to provide access to
# the original/raw name.
name: _Validator[str] = _Validator()
""":external:ref:`core-metadata-name`
(required; validated using :func:`~packaging.utils.canonicalize_name` and its
*validate* parameter)"""
version: _Validator[version_module.Version] = _Validator()
""":external:ref:`core-metadata-version` (required)"""
dynamic: _Validator[list[str] | None] = _Validator(
added="2.2",
)
""":external:ref:`core-metadata-dynamic`
(validated against core metadata field names and lowercased)"""
platforms: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-platform`"""
supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-supported-platform`"""
summary: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-summary` (validated to contain no newlines)"""
description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body
""":external:ref:`core-metadata-description`"""
description_content_type: _Validator[str | None] = _Validator(added="2.1")
""":external:ref:`core-metadata-description-content-type` (validated)"""
keywords: _Validator[list[str] | None] = _Validator()
""":external:ref:`core-metadata-keywords`"""
home_page: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-home-page`"""
download_url: _Validator[str | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-download-url`"""
author: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author`"""
author_email: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-author-email`"""
maintainer: _Validator[str | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-maintainer`"""
maintainer_email: _Validator[str | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-maintainer-email`"""
license: _Validator[str | None] = _Validator()
""":external:ref:`core-metadata-license`"""
license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator(
added="2.4"
)
""":external:ref:`core-metadata-license-expression`"""
license_files: _Validator[list[str] | None] = _Validator(added="2.4")
""":external:ref:`core-metadata-license-file`"""
classifiers: _Validator[list[str] | None] = _Validator(added="1.1")
""":external:ref:`core-metadata-classifier`"""
requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator(
added="1.2"
)
""":external:ref:`core-metadata-requires-dist`"""
requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator(
added="1.2"
)
""":external:ref:`core-metadata-requires-python`"""
# Because `Requires-External` allows for non-PEP 440 version specifiers, we
# don't do any processing on the values.
requires_external: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-requires-external`"""
project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-project-url`"""
# PEP 685 lets us raise an error if an extra doesn't pass `Name` validation
# regardless of metadata version.
provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator(
added="2.1",
)
""":external:ref:`core-metadata-provides-extra`"""
provides_dist: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-provides-dist`"""
obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2")
""":external:ref:`core-metadata-obsoletes-dist`"""
requires: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Requires`` (deprecated)"""
provides: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Provides`` (deprecated)"""
obsoletes: _Validator[list[str] | None] = _Validator(added="1.1")
"""``Obsoletes`` (deprecated)"""
venv\Lib\site-packages\packaging\requirements.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
from typing import Any, Iterator
from ._parser import parse_requirement as _parse_requirement
from ._tokenizer import ParserSyntaxError
from .markers import Marker, _normalize_extra_values
from .specifiers import SpecifierSet
from .utils import canonicalize_name
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
class Requirement:
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string: str) -> None:
try:
parsed = _parse_requirement(requirement_string)
except ParserSyntaxError as e:
raise InvalidRequirement(str(e)) from e
self.name: str = parsed.name
self.url: str | None = parsed.url or None
self.extras: set[str] = set(parsed.extras or [])
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: Marker | None = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
def _iter_parts(self, name: str) -> Iterator[str]:
yield name
if self.extras:
formatted_extras = ",".join(sorted(self.extras))
yield f"[{formatted_extras}]"
if self.specifier:
yield str(self.specifier)
if self.url:
yield f"@ {self.url}"
if self.marker:
yield " "
if self.marker:
yield f"; {self.marker}"
def __str__(self) -> str:
return "".join(self._iter_parts(self.name))
def __repr__(self) -> str:
return f""
def __hash__(self) -> int:
return hash(
(
self.__class__.__name__,
*self._iter_parts(canonicalize_name(self.name)),
)
)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Requirement):
return NotImplemented
return (
canonicalize_name(self.name) == canonicalize_name(other.name)
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
)
venv\Lib\site-packages\packaging\specifiers.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier
from packaging.version import Version
"""
from __future__ import annotations
import abc
import itertools
import re
from typing import Callable, Iterable, Iterator, TypeVar, Union
from .utils import canonicalize_version
from .version import Version
UnparsedVersion = Union[Version, str]
UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)
CallableOperator = Callable[[Version, str], bool]
def _coerce_version(version: UnparsedVersion) -> Version:
if not isinstance(version, Version):
version = Version(version)
return version
class InvalidSpecifier(ValueError):
"""
Raised when attempting to create a :class:`Specifier` with a specifier
string that is invalid.
>>> Specifier("lolwat")
Traceback (most recent call last):
...
packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'
"""
class BaseSpecifier(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __str__(self) -> str:
"""
Returns the str representation of this Specifier-like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Returns a hash value for this Specifier-like object.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier-like
objects are equal.
:param other: The other object to check against.
"""
@property
@abc.abstractmethod
def prereleases(self) -> bool | None:
"""Whether or not pre-releases as a whole are allowed.
This can be set to either ``True`` or ``False`` to explicitly enable or disable
prereleases or it can be set to ``None`` (the default) to use default semantics.
"""
@prereleases.setter
def prereleases(self, value: bool) -> None:
"""Setter for :attr:`prereleases`.
:param value: The value to set.
"""
@abc.abstractmethod
def contains(self, item: str, prereleases: bool | None = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class Specifier(BaseSpecifier):
"""This class abstracts handling of version specifiers.
.. tip::
It is generally not required to instantiate this manually. You should instead
prefer to work with :class:`SpecifierSet` instead, which can parse
comma-separated version specifiers (which is what package metadata contains).
"""
_operator_regex_str = r"""
(?P(~=|==|!=|<=|>=|<|>|===))
"""
_version_regex_str = r"""
(?P
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s;)]* # The arbitrary version can be just about anything,
# we match everything except for whitespace, a
# semi-colon for marker support, and a closing paren
# since versions can be enclosed in them.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
# You cannot use a wild card and a pre-release, post-release, a dev or
# local version together so group them with a | and make them optional.
(?:
\.\* # Wild card syntax of .*
|
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
def __init__(self, spec: str = "", prereleases: bool | None = None) -> None:
"""Initialize a Specifier instance.
:param spec:
The string representation of a specifier which will be parsed and
normalized before use.
:param prereleases:
This tells the specifier if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given specifier is invalid (i.e. bad syntax).
"""
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier(f"Invalid specifier: {spec!r}")
self._spec: tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
# https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
@property # type: ignore[override]
def prereleases(self) -> bool:
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "===", ">", "<"]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if Version(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
@property
def operator(self) -> str:
"""The operator of this specifier.
>>> Specifier("==1.2.3").operator
'=='
"""
return self._spec[0]
@property
def version(self) -> str:
"""The version of this specifier.
>>> Specifier("==1.2.3").version
'1.2.3'
"""
return self._spec[1]
def __repr__(self) -> str:
"""A representation of the Specifier that shows all internal state.
>>> Specifier('>=1.0.0')
=1.0.0')>
>>> Specifier('>=1.0.0', prereleases=False)
=1.0.0', prereleases=False)>
>>> Specifier('>=1.0.0', prereleases=True)
=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
>>> str(Specifier('>=1.0.0'))
'>=1.0.0'
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
@property
def _canonical_spec(self) -> tuple[str, str]:
canonical_version = canonicalize_version(
self._spec[1],
strip_trailing_zero=(self._spec[0] != "~="),
)
return self._spec[0], canonical_version
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
"""Whether or not the two Specifier-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
True
>>> (Specifier("==1.2.3", prereleases=False) ==
... Specifier("==1.2.3", prereleases=True))
True
>>> Specifier("==1.2.3") == "==1.2.3"
True
>>> Specifier("==1.2.3") == Specifier("==1.2.4")
False
>>> Specifier("==1.2.3") == Specifier("~=1.2.3")
False
"""
if isinstance(other, str):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
)
return operator_callable
def _compare_compatible(self, prospective: Version, spec: str) -> bool:
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = _version_join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(
prospective.public, strip_trailing_zero=False
)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
# Split the spec out by bangs and dots, and pretend that there is
# an implicit dot in between a release segment and a pre-release segment.
split_spec = _version_split(normalized_spec)
# Split the prospective version out by bangs and dots, and pretend
# that there is an implicit dot in between a release segment and
# a pre-release segment.
split_prospective = _version_split(normalized_prospective)
# 0-pad the prospective version before shortening it to get the correct
# shortened version.
padded_prospective, _ = _pad_version(split_prospective, split_spec)
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = padded_prospective[: len(split_spec)]
return shortened_prospective == split_spec
else:
# Convert our spec string into a Version
spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = Version(prospective.public)
return prospective == spec_version
def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) <= Version(spec)
def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) >= Version(spec)
def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: str | Version) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in Specifier(">=1.2.3")
True
>>> Version("1.2.3") in Specifier(">=1.2.3")
True
>>> "1.0.0" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
True
"""
return self.contains(item)
def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this Specifier. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> Specifier(">=1.2.3").contains("1.2.3")
True
>>> Specifier(">=1.2.3").contains(Version("1.2.3"))
True
>>> Specifier(">=1.2.3").contains("1.0.0")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
False
>>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
True
>>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
True
"""
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version, this allows us to have a shortcut for
# "2.0" in Specifier(">=2")
normalized_item = _coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable: CallableOperator = self._get_operator(self.operator)
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(Specifier().contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
['1.2.3', '1.3', ]
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
"""
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = _coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later in case nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version: str) -> list[str]:
"""Split version into components.
The split components are intended for version comparison. The logic does
not attempt to retain the original version string, so joining the
components back with :func:`_version_join` may not produce the original
version string.
"""
result: list[str] = []
epoch, _, rest = version.rpartition("!")
result.append(epoch or "0")
for item in rest.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _version_join(components: list[str]) -> str:
"""Join split version components into a version string.
This function assumes the input came from :func:`_version_split`, where the
first component must be the epoch (either empty or numeric), and all other
components numeric.
"""
epoch, *rest = components
return f"{epoch}!{'.'.join(rest)}"
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
)
def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (
list(itertools.chain.from_iterable(left_split)),
list(itertools.chain.from_iterable(right_split)),
)
class SpecifierSet(BaseSpecifier):
"""This class abstracts handling of a set of version specifiers.
It can be passed a single specifier (``>=3.0``), a comma-separated list of
specifiers (``>=3.0,!=3.1``), or no specifier at all.
"""
def __init__(
self,
specifiers: str | Iterable[Specifier] = "",
prereleases: bool | None = None,
) -> None:
"""Initialize a SpecifierSet instance.
:param specifiers:
The string representation of a specifier or a comma-separated list of
specifiers which will be parsed and normalized before use.
May also be an iterable of ``Specifier`` instances, which will be used
as is.
:param prereleases:
This tells the SpecifierSet if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given ``specifiers`` are not parseable than this exception will be
raised.
"""
if isinstance(specifiers, str):
# Split on `,` to break each individual specifier into its own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Make each individual specifier a Specifier and save in a frozen set
# for later.
self._specs = frozenset(map(Specifier, split_specifiers))
else:
# Save the supplied specifiers in a frozen set.
self._specs = frozenset(specifiers)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@property
def prereleases(self) -> bool | None:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __repr__(self) -> str:
"""A representation of the specifier set that shows all internal state.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> SpecifierSet('>=1.0.0,!=2.0.0')
=1.0.0')>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
=1.0.0', prereleases=False)>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f""
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
'!=1.0.1,>=1.0.0'
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: SpecifierSet | str) -> SpecifierSet:
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
>>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
=1.0.0')>
>>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
=1.0.0')>
"""
if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease overrides."
)
return specifier
def __eq__(self, other: object) -> bool:
"""Whether or not the two SpecifierSet-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
False
"""
if isinstance(other, (str, Specifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __len__(self) -> int:
"""Returns the number of specifiers in this specifier set."""
return len(self._specs)
def __iter__(self) -> Iterator[Specifier]:
"""
Returns an iterator over all the underlying :class:`Specifier` instances
in this specifier set.
>>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
[, =1.0.0')>]
"""
return iter(self._specs)
def __contains__(self, item: UnparsedVersion) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
True
"""
return self.contains(item)
def contains(
self,
item: UnparsedVersion,
prereleases: bool | None = None,
installed: bool | None = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this SpecifierSet. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
True
"""
# Ensure that our item is a Version instance.
if not isinstance(item, Version):
item = Version(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
if installed and item.is_prerelease:
item = Version(item.base_version)
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
['1.3', ]
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
[]
>>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
An "empty" SpecifierSet will filter items based on the presence of prerelease
versions in the set.
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet("").filter(["1.5a1"]))
['1.5a1']
>>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
"""
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iter(iterable)
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases.
else:
filtered: list[UnparsedVersionVar] = []
found_prereleases: list[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return iter(found_prereleases)
return iter(filtered)
venv\Lib\site-packages\packaging\tags.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
import logging
import platform
import re
import struct
import subprocess
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
Iterable,
Iterator,
Sequence,
Tuple,
cast,
)
from . import _manylinux, _musllinux
logger = logging.getLogger(__name__)
PythonVersion = Sequence[int]
AppleVersion = Tuple[int, int]
INTERPRETER_SHORT_NAMES: dict[str, str] = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
}
_32_BIT_INTERPRETER = struct.calcsize("P") == 4
class Tag:
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_abi", "_hash", "_interpreter", "_platform"]
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
@property
def interpreter(self) -> str:
return self._interpreter
@property
def abi(self) -> str:
return self._abi
@property
def platform(self) -> str:
return self._platform
def __eq__(self, other: object) -> bool:
if not isinstance(other, Tag):
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
def __repr__(self) -> str:
return f"<{self} @ {id(self)}>"
def parse_tag(tag: str) -> frozenset[Tag]:
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _get_config_var(name: str, warn: bool = False) -> int | str | None:
value: int | str | None = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string: str) -> str:
return string.replace(".", "_").replace("-", "_").replace(" ", "_")
def _is_threaded_cpython(abis: list[str]) -> bool:
"""
Determine if the ABI corresponds to a threaded (`--disable-gil`) build.
The threaded builds are indicated by a "t" in the abiflags.
"""
if len(abis) == 0:
return False
# expect e.g., cp313
m = re.match(r"cp\d+(.*)", abis[0])
if not m:
return False
abiflags = m.group(1)
return "t" in abiflags
def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool:
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`)
builds do not support abi3.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]:
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
threading = debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn):
threading = "t"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append(f"cp{version}{threading}")
abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}")
return abis
def cpython_tags(
python_version: PythonVersion | None = None,
abis: Iterable[str] | None = None,
platforms: Iterable[str] | None = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp--
- cp-abi3-
- cp-none-
- cp-abi3- # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
if not python_version:
python_version = sys.version_info[:2]
interpreter = f"cp{_version_nodot(python_version[:2])}"
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
threading = _is_threaded_cpython(abis)
use_abi3 = _abi3_applies(python_version, threading)
if use_abi3:
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
if use_abi3:
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
version = _version_nodot((python_version[0], minor_version))
interpreter = f"cp{version}"
yield Tag(interpreter, "abi3", platform_)
def _generic_abi() -> list[str]:
"""
Return the ABI tag based on EXT_SUFFIX.
"""
# The following are examples of `EXT_SUFFIX`.
# We want to keep the parts which are related to the ABI and remove the
# parts which are related to the platform:
# - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310
# - mac: '.cpython-310-darwin.so' => cp310
# - win: '.cp310-win_amd64.pyd' => cp310
# - win: '.pyd' => cp37 (uses _cpython_abis())
# - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
# - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
# => graalpy_38_native
ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
parts = ext_suffix.split(".")
if len(parts) < 3:
# CPython3.7 and earlier uses ".pyd" on Windows.
return _cpython_abis(sys.version_info[:2])
soabi = parts[1]
if soabi.startswith("cpython"):
# non-windows
abi = "cp" + soabi.split("-")[1]
elif soabi.startswith("cp"):
# windows
abi = soabi.split("-")[0]
elif soabi.startswith("pypy"):
abi = "-".join(soabi.split("-")[:2])
elif soabi.startswith("graalpy"):
abi = "-".join(soabi.split("-")[:3])
elif soabi:
# pyston, ironpython, others?
abi = soabi
else:
return []
return [_normalize_string(abi)]
def generic_tags(
interpreter: str | None = None,
abis: Iterable[str] | None = None,
platforms: Iterable[str] | None = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a generic interpreter.
The tags consist of:
- --
The "none" ABI will be added if it was not explicitly provided.
"""
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
else:
abis = list(abis)
platforms = list(platforms or platform_tags())
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield f"py{_version_nodot(py_version[:2])}"
yield f"py{py_version[0]}"
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield f"py{_version_nodot((py_version[0], minor))}"
def compatible_tags(
python_version: PythonVersion | None = None,
interpreter: str | None = None,
platforms: Iterable[str] | None = None,
) -> Iterator[Tag]:
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-
- -none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version: AppleVersion, cpu_arch: str) -> list[str]:
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
return formats
def mac_platforms(
version: AppleVersion | None = None, arch: str | None = None
) -> Iterator[str]:
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver()
if version is None:
version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2])))
if version == (10, 16):
# When built against an older macOS SDK, Python will report macOS 10.16
# instead of the real version.
version_str = subprocess.run(
[
sys.executable,
"-sS",
"-c",
"import platform; print(platform.mac_ver()[0])",
],
check=True,
env={"SYSTEM_VERSION_COMPAT": "0"},
stdout=subprocess.PIPE,
text=True,
).stdout
version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
major_version = 10
for minor_version in range(version[1], -1, -1):
compat_version = major_version, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield f"macosx_{major_version}_{minor_version}_{binary_format}"
if version >= (11, 0):
# Starting with Mac OS 11, each yearly release bumps the major version
# number. The minor versions are now the midyear updates.
minor_version = 0
for major_version in range(version[0], 10, -1):
compat_version = major_version, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield f"macosx_{major_version}_{minor_version}_{binary_format}"
if version >= (11, 0):
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
# releases exist.
#
# However, the "universal2" binary format can have a
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
# that version of macOS.
major_version = 10
if arch == "x86_64":
for minor_version in range(16, 3, -1):
compat_version = major_version, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield f"macosx_{major_version}_{minor_version}_{binary_format}"
else:
for minor_version in range(16, 3, -1):
compat_version = major_version, minor_version
binary_format = "universal2"
yield f"macosx_{major_version}_{minor_version}_{binary_format}"
def ios_platforms(
version: AppleVersion | None = None, multiarch: str | None = None
) -> Iterator[str]:
"""
Yields the platform tags for an iOS system.
:param version: A two-item tuple specifying the iOS version to generate
platform tags for. Defaults to the current iOS version.
:param multiarch: The CPU architecture+ABI to generate platform tags for -
(the value used by `sys.implementation._multiarch` e.g.,
`arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current
multiarch value.
"""
if version is None:
# if iOS is the current platform, ios_ver *must* be defined. However,
# it won't exist for CPython versions before 3.13, which causes a mypy
# error.
_, release, _, _ = platform.ios_ver() # type: ignore[attr-defined, unused-ignore]
version = cast("AppleVersion", tuple(map(int, release.split(".")[:2])))
if multiarch is None:
multiarch = sys.implementation._multiarch
multiarch = multiarch.replace("-", "_")
ios_platform_template = "ios_{major}_{minor}_{multiarch}"
# Consider any iOS major.minor version from the version requested, down to
# 12.0. 12.0 is the first iOS version that is known to have enough features
# to support CPython. Consider every possible minor release up to X.9. There
# highest the minor has ever gone is 8 (14.8 and 15.8) but having some extra
# candidates that won't ever match doesn't really hurt, and it saves us from
# having to keep an explicit list of known iOS versions in the code. Return
# the results descending order of version number.
# If the requested major version is less than 12, there won't be any matches.
if version[0] < 12:
return
# Consider the actual X.Y version that was requested.
yield ios_platform_template.format(
major=version[0], minor=version[1], multiarch=multiarch
)
# Consider every minor version from X.0 to the minor version prior to the
# version requested by the platform.
for minor in range(version[1] - 1, -1, -1):
yield ios_platform_template.format(
major=version[0], minor=minor, multiarch=multiarch
)
for major in range(version[0] - 1, 11, -1):
for minor in range(9, -1, -1):
yield ios_platform_template.format(
major=major, minor=minor, multiarch=multiarch
)
def android_platforms(
api_level: int | None = None, abi: str | None = None
) -> Iterator[str]:
"""
Yields the :attr:`~Tag.platform` tags for Android. If this function is invoked on
non-Android platforms, the ``api_level`` and ``abi`` arguments are required.
:param int api_level: The maximum `API level
`__ to return. Defaults
to the current system's version, as returned by ``platform.android_ver``.
:param str abi: The `Android ABI `__,
e.g. ``arm64_v8a``. Defaults to the current system's ABI , as returned by
``sysconfig.get_platform``. Hyphens and periods will be replaced with
underscores.
"""
if platform.system() != "Android" and (api_level is None or abi is None):
raise TypeError(
"on non-Android platforms, the api_level and abi arguments are required"
)
if api_level is None:
# Python 3.13 was the first version to return platform.system() == "Android",
# and also the first version to define platform.android_ver().
api_level = platform.android_ver().api_level # type: ignore[attr-defined]
if abi is None:
abi = sysconfig.get_platform().split("-")[-1]
abi = _normalize_string(abi)
# 16 is the minimum API level known to have enough features to support CPython
# without major patching. Yield every API level from the maximum down to the
# minimum, inclusive.
min_api_level = 16
for ver in range(api_level, min_api_level - 1, -1):
yield f"android_{ver}_{abi}"
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
linux = _normalize_string(sysconfig.get_platform())
if not linux.startswith("linux_"):
# we should never be here, just yield the sysconfig one and return
yield linux
return
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv8l"
_, arch = linux.split("_", 1)
archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch])
yield from _manylinux.platform_tags(archs)
yield from _musllinux.platform_tags(archs)
for arch in archs:
yield f"linux_{arch}"
def _generic_platforms() -> Iterator[str]:
yield _normalize_string(sysconfig.get_platform())
def platform_tags() -> Iterator[str]:
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "iOS":
return ios_platforms()
elif platform.system() == "Android":
return android_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name() -> str:
"""
Returns the name of the running interpreter.
Some implementations have a reserved, two-letter abbreviation which will
be returned when appropriate.
"""
name = sys.implementation.name
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(*, warn: bool = False) -> str:
"""
Returns the version of the running interpreter.
"""
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version: PythonVersion) -> str:
return "".join(map(str, version))
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
interp_name = interpreter_name()
if interp_name == "cp":
yield from cpython_tags(warn=warn)
else:
yield from generic_tags()
if interp_name == "pp":
interp = "pp3"
elif interp_name == "cp":
interp = "cp" + interpreter_version(warn=warn)
else:
interp = None
yield from compatible_tags(interpreter=interp)
venv\Lib\site-packages\packaging\utils.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations
import functools
import re
from typing import NewType, Tuple, Union, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version, _TrimmedRelease
BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)
class InvalidName(ValueError):
"""
An invalid distribution name; users should refer to the packaging user guide.
"""
class InvalidWheelFilename(ValueError):
"""
An invalid wheel filename was found, users should refer to PEP 427.
"""
class InvalidSdistFilename(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
# Core metadata spec for `Name`
_validate_regex = re.compile(
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
)
_canonicalize_regex = re.compile(r"[-_.]+")
_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName:
if validate and not _validate_regex.match(name):
raise InvalidName(f"name is invalid: {name!r}")
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
def is_normalized_name(name: str) -> bool:
return _normalized_regex.match(name) is not None
@functools.singledispatch
def canonicalize_version(
version: Version | str, *, strip_trailing_zero: bool = True
) -> str:
"""
Return a canonical form of a version as a string.
>>> canonicalize_version('1.0.1')
'1.0.1'
Per PEP 625, versions may have multiple canonical forms, differing
only by trailing zeros.
>>> canonicalize_version('1.0.0')
'1'
>>> canonicalize_version('1.0.0', strip_trailing_zero=False)
'1.0.0'
Invalid versions are returned unaltered.
>>> canonicalize_version('foo bar baz')
'foo bar baz'
"""
return str(_TrimmedRelease(str(version)) if strip_trailing_zero else version)
@canonicalize_version.register
def _(version: str, *, strip_trailing_zero: bool = True) -> str:
try:
parsed = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
return canonicalize_version(parsed, strip_trailing_zero=strip_trailing_zero)
def parse_wheel_filename(
filename: str,
) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]:
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename!r}"
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename!r}"
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name.
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename!r}")
name = canonicalize_name(name_part)
try:
version = Version(parts[1])
except InvalidVersion as e:
raise InvalidWheelFilename(
f"Invalid wheel filename (invalid version): {filename!r}"
) from e
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in {filename!r}"
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags)
def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename!r}"
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename!r}")
name = canonicalize_name(name_part)
try:
version = Version(version_part)
except InvalidVersion as e:
raise InvalidSdistFilename(
f"Invalid sdist filename (invalid version): {filename!r}"
) from e
return (name, version)
venv\Lib\site-packages\packaging\version.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.version import parse, Version
"""
from __future__ import annotations
import itertools
import re
from typing import Any, Callable, NamedTuple, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
__all__ = ["VERSION_PATTERN", "InvalidVersion", "Version", "parse"]
LocalType = Tuple[Union[int, str], ...]
CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]]
CmpLocalType = Union[
NegativeInfinityType,
Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...],
]
CmpKey = Tuple[
int,
Tuple[int, ...],
CmpPrePostDevType,
CmpPrePostDevType,
CmpPrePostDevType,
CmpLocalType,
]
VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
class _Version(NamedTuple):
epoch: int
release: tuple[int, ...]
dev: tuple[str, int] | None
pre: tuple[str, int] | None
post: tuple[str, int] | None
local: LocalType | None
def parse(version: str) -> Version:
"""Parse the given version string.
>>> parse('1.0.dev1')
:param version: The version string to parse.
:raises InvalidVersion: When the version string is not a valid version.
"""
return Version(version)
class InvalidVersion(ValueError):
"""Raised when a version string is not a valid version.
>>> Version("invalid")
Traceback (most recent call last):
...
packaging.version.InvalidVersion: Invalid version: 'invalid'
"""
class _BaseVersion:
_key: tuple[Any, ...]
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: _BaseVersion) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P[0-9]+)!)? # epoch
(?P[0-9]+(?:\.[0-9]+)*) # release segment
(?P
# pre-release
[-_\.]?
(?Palpha|a|beta|b|preview|pre|c|rc)
[-_\.]?
(?P[0-9]+)?
)?
(?P # post release
(?:-(?P[0-9]+))
|
(?:
[-_\.]?
(?Ppost|rev|r)
[-_\.]?
(?P[0-9]+)?
)
)?
(?P # dev release
[-_\.]?
(?Pdev)
[-_\.]?
(?P[0-9]+)?
)?
)
(?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
>>> v2
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_key: CmpKey
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: {version!r}")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
"""
return f""
def __str__(self) -> str:
"""A string representation of the version that can be round-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
return self._version.epoch
@property
def release(self) -> tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
return self._version.release
@property
def pre(self) -> tuple[str, int] | None:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
return self._version.pre
@property
def post(self) -> int | None:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> int | None:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> str | None:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1!1.2.3dev1+abc").public
'1!1.2.3.dev1'
"""
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3dev1+abc").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
@property
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
@property
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
class _TrimmedRelease(Version):
@property
def release(self) -> tuple[int, ...]:
"""
Release segment without any trailing zeros.
>>> _TrimmedRelease('1.0.0').release
(1,)
>>> _TrimmedRelease('0.0').release
(0,)
"""
rel = super().release
nonzeros = (index for index, val in enumerate(rel) if val)
last_nonzero = max(nonzeros, default=0)
return rel[: last_nonzero + 1]
def _parse_letter_version(
letter: str | None, number: str | bytes | SupportsInt | None
) -> tuple[str, int] | None:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
assert not letter
if number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local: str | None) -> LocalType | None:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
def _cmpkey(
epoch: int,
release: tuple[int, ...],
pre: tuple[str, int] | None,
post: tuple[str, int] | None,
dev: tuple[str, int] | None,
local: LocalType | None,
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: CmpPrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: CmpPrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: CmpPrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: CmpLocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
venv\Lib\site-packages\packaging\_elffile.py
"""
ELF file parser.
This provides a class ``ELFFile`` that parses an ELF executable in a similar
interface to ``ZipFile``. Only the read interface is implemented.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
from __future__ import annotations
import enum
import os
import struct
from typing import IO
class ELFInvalid(ValueError):
pass
class EIClass(enum.IntEnum):
C32 = 1
C64 = 2
class EIData(enum.IntEnum):
Lsb = 1
Msb = 2
class EMachine(enum.IntEnum):
I386 = 3
S390 = 22
Arm = 40
X8664 = 62
AArc64 = 183
class ELFFile:
"""
Representation of an ELF executable.
"""
def __init__(self, f: IO[bytes]) -> None:
self._f = f
try:
ident = self._read("16B")
except struct.error as e:
raise ELFInvalid("unable to parse identification") from e
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError as e:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or encoding ({self.encoding})"
) from e
try:
(
_,
self.machine, # Architecture type.
_,
_,
self._e_phoff, # Offset of program header.
_,
self.flags, # Processor-specific flags.
_,
self._e_phentsize, # Size of section.
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
def _read(self, fmt: str) -> tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> str | None:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
for index in range(self._e_phnum):
self._f.seek(self._e_phoff + self._e_phentsize * index)
try:
data = self._read(self._p_fmt)
except struct.error:
continue
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return None
venv\Lib\site-packages\packaging\_manylinux.py
from __future__ import annotations
import collections
import contextlib
import functools
import os
import re
import sys
import warnings
from typing import Generator, Iterator, NamedTuple, Sequence
from ._elffile import EIClass, EIData, ELFFile, EMachine
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
# as the type for `path` until then.
@contextlib.contextmanager
def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]:
try:
with open(path, "rb") as f:
yield ELFFile(f)
except (OSError, TypeError, ValueError):
yield None
def _is_linux_armhf(executable: str) -> bool:
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.Arm
and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
)
def _is_linux_i686(executable: str) -> bool:
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.I386
)
def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool:
if "armv7l" in archs:
return _is_linux_armhf(executable)
if "i686" in archs:
return _is_linux_i686(executable)
allowed_archs = {
"x86_64",
"aarch64",
"ppc64",
"ppc64le",
"s390x",
"loongarch64",
"riscv64",
}
return any(arch in allowed_archs for arch in archs)
# If glibc ever changes its major version, we need to know what the last
# minor version was, so we can build the complete list of all versions.
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50)
class _GLibCVersion(NamedTuple):
major: int
minor: int
def _glibc_version_string_confstr() -> str | None:
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try:
# Should be a string like "glibc 2.17".
version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION")
assert version_string is not None
_, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes() -> str | None:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _glibc_version_string() -> str | None:
"""Returns glibc version string, or None if not using glibc."""
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _parse_glibc_version(version_str: str) -> tuple[int, int]:
"""Parse glibc version.
We use a regexp instead of str.split because we want to discard any
random junk that might come after the minor version -- this might happen
in patched/forked versions of glibc (e.g. Linaro's version of glibc
uses version strings like "2.20-2014.11"). See gh-3588.
"""
m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str)
if not m:
warnings.warn(
f"Expected glibc version with 2 components major.minor, got: {version_str}",
RuntimeWarning,
stacklevel=2,
)
return -1, -1
return int(m.group("major")), int(m.group("minor"))
@functools.lru_cache
def _get_glibc_version() -> tuple[int, int]:
version_str = _glibc_version_string()
if version_str is None:
return (-1, -1)
return _parse_glibc_version(version_str)
# From PEP 513, PEP 600
def _is_compatible(arch: str, version: _GLibCVersion) -> bool:
sys_glibc = _get_glibc_version()
if sys_glibc < version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux
except ImportError:
return True
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
if result is not None:
return bool(result)
return True
if version == _GLibCVersion(2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if version == _GLibCVersion(2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if version == _GLibCVersion(2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
def platform_tags(archs: Sequence[str]) -> Iterator[str]:
"""Generate manylinux tags compatible to the current platform.
:param archs: Sequence of compatible architectures.
The first one shall be the closest to the actual architecture and be the part of
platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
The ``linux_`` prefix is assumed as a prerequisite for the current platform to
be manylinux-compatible.
:returns: An iterator of compatible manylinux tags.
"""
if not _have_compatible_abi(sys.executable, archs):
return
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16)
if set(archs) & {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = _GLibCVersion(2, 4)
current_glibc = _GLibCVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
for arch in archs:
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_compatible(arch, glibc_version):
yield f"{tag}_{arch}"
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_compatible(arch, glibc_version):
yield f"{legacy_tag}_{arch}"
venv\Lib\site-packages\packaging\_musllinux.py
"""PEP 656 support.
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
from __future__ import annotations
import functools
import re
import subprocess
import sys
from typing import Iterator, NamedTuple, Sequence
from ._elffile import ELFFile
class _MuslVersion(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> _MuslVersion | None:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache
def _get_musl_version(executable: str) -> _MuslVersion | None:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
try:
with open(executable, "rb") as f:
ld = ELFFile(f).interpreter
except (OSError, TypeError, ValueError):
return None
if ld is None or "musl" not in ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
return _parse_musl_version(proc.stderr)
def platform_tags(archs: Sequence[str]) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param archs: Sequence of compatible architectures.
The first one shall be the closest to the actual architecture and be the part of
platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
The ``linux_`` prefix is assumed as a prerequisite for the current platform to
be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for arch in archs:
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
venv\Lib\site-packages\packaging\_parser.py
"""Handwritten parser of dependency specifiers.
The docstring for each __parse_* function contains EBNF-inspired grammar representing
the implementation.
"""
from __future__ import annotations
import ast
from typing import NamedTuple, Sequence, Tuple, Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
class Node:
def __init__(self, value: str) -> None:
self.value = value
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
MarkerVar = Union[Variable, Value]
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
MarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]]
MarkerList = Sequence[Union["MarkerList", MarkerAtom, str]]
class ParsedRequirement(NamedTuple):
name: str
url: str
extras: list[str]
specifier: str
marker: MarkerList | None
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> tuple[str, str, MarkerList | None]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
if specifier
else "name and no valid version specifier"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> list[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens(
"LEFT_BRACKET",
"RIGHT_BRACKET",
around="extras",
):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> list[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: list[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="version specifier",
):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
span_start = tokenizer.position
parsed_specifiers += tokenizer.read().text
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
tokenizer.raise_syntax_error(
".* suffix can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position + 1,
)
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
tokenizer.raise_syntax_error(
"Local version label can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position,
)
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
retval = _parse_marker(tokenizer)
tokenizer.expect("END", expected="end of marker expression")
return retval
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="marker expression",
):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if env_var in ("platform_python_implementation", "python_implementation"):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in"
)
venv\Lib\site-packages\packaging\_structures.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
class InfinityType:
def __repr__(self) -> str:
return "Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return False
def __le__(self, other: object) -> bool:
return False
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return True
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
venv\Lib\site-packages\packaging\_tokenizer.py
from __future__ import annotations
import contextlib
import re
from dataclasses import dataclass
from typing import Iterator, NoReturn
from .specifiers import Specifier
@dataclass
class Token:
name: str
text: str
position: int
class ParserSyntaxError(Exception):
"""The provided source text could not be parsed correctly."""
def __init__(
self,
message: str,
*,
source: str,
span: tuple[int, int],
) -> None:
self.span = span
self.message = message
self.source = source
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
DEFAULT_RULES: dict[str, str | re.Pattern[str]] = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extras?
|dependency_groups
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
}
class Tokenizer:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: dict[str, str | re.Pattern[str]],
) -> None:
self.source = source
self.rules: dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Token | None = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert self.next_token is None, (
f"Cannot check for {name!r}, already have {self.next_token!r}"
)
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: int | None = None,
span_end: int | None = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
span_start=open_position,
)
self.read()
venv\Lib\site-packages\packaging\__init__.py
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "25.0"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD-2-Clause or Apache-2.0"
__copyright__ = f"2014 {__author__}"
venv\Lib\site-packages\PIL\AvifImagePlugin.py
from __future__ import annotations
import os
from io import BytesIO
from typing import IO
from . import ExifTags, Image, ImageFile
try:
from . import _avif
SUPPORTED = True
except ImportError:
SUPPORTED = False
# Decoder options as module globals, until there is a way to pass parameters
# to Image.open (see https://github.com/python-pillow/Pillow/issues/569)
DECODE_CODEC_CHOICE = "auto"
DEFAULT_MAX_THREADS = 0
def get_codec_version(codec_name: str) -> str | None:
versions = _avif.codec_versions()
for version in versions.split(", "):
if version.split(" [")[0] == codec_name:
return version.split(":")[-1].split(" ")[0]
return None
def _accept(prefix: bytes) -> bool | str:
if prefix[4:8] != b"ftyp":
return False
major_brand = prefix[8:12]
if major_brand in (
# coding brands
b"avif",
b"avis",
# We accept files with AVIF container brands; we can't yet know if
# the ftyp box has the correct compatible brands, but if it doesn't
# then the plugin will raise a SyntaxError which Pillow will catch
# before moving on to the next plugin that accepts the file.
#
# Also, because this file might not actually be an AVIF file, we
# don't raise an error if AVIF support isn't properly compiled.
b"mif1",
b"msf1",
):
if not SUPPORTED:
return (
"image file could not be identified because AVIF support not installed"
)
return True
return False
def _get_default_max_threads() -> int:
if DEFAULT_MAX_THREADS:
return DEFAULT_MAX_THREADS
if hasattr(os, "sched_getaffinity"):
return len(os.sched_getaffinity(0))
else:
return os.cpu_count() or 1
class AvifImageFile(ImageFile.ImageFile):
format = "AVIF"
format_description = "AVIF image"
__frame = -1
def _open(self) -> None:
if not SUPPORTED:
msg = "image file could not be opened because AVIF support not installed"
raise SyntaxError(msg)
if DECODE_CODEC_CHOICE != "auto" and not _avif.decoder_codec_available(
DECODE_CODEC_CHOICE
):
msg = "Invalid opening codec"
raise ValueError(msg)
self._decoder = _avif.AvifDecoder(
self.fp.read(),
DECODE_CODEC_CHOICE,
_get_default_max_threads(),
)
# Get info from decoder
self._size, self.n_frames, self._mode, icc, exif, exif_orientation, xmp = (
self._decoder.get_info()
)
self.is_animated = self.n_frames > 1
if icc:
self.info["icc_profile"] = icc
if xmp:
self.info["xmp"] = xmp
if exif_orientation != 1 or exif:
exif_data = Image.Exif()
if exif:
exif_data.load(exif)
original_orientation = exif_data.get(ExifTags.Base.Orientation, 1)
else:
original_orientation = 1
if exif_orientation != original_orientation:
exif_data[ExifTags.Base.Orientation] = exif_orientation
exif = exif_data.tobytes()
if exif:
self.info["exif"] = exif
self.seek(0)
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
# Set tile
self.__frame = frame
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, self.mode)]
def load(self) -> Image.core.PixelAccess | None:
if self.tile:
# We need to load the image data for this frame
data, timescale, pts_in_timescales, duration_in_timescales = (
self._decoder.get_frame(self.__frame)
)
self.info["timestamp"] = round(1000 * (pts_in_timescales / timescale))
self.info["duration"] = round(1000 * (duration_in_timescales / timescale))
if self.fp and self._exclusive_fp:
self.fp.close()
self.fp = BytesIO(data)
return super().load()
def load_seek(self, pos: int) -> None:
pass
def tell(self) -> int:
return self.__frame
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
_save(im, fp, filename, save_all=True)
def _save(
im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False
) -> None:
info = im.encoderinfo.copy()
if save_all:
append_images = list(info.get("append_images", []))
else:
append_images = []
total = 0
for ims in [im] + append_images:
total += getattr(ims, "n_frames", 1)
quality = info.get("quality", 75)
if not isinstance(quality, int) or quality < 0 or quality > 100:
msg = "Invalid quality setting"
raise ValueError(msg)
duration = info.get("duration", 0)
subsampling = info.get("subsampling", "4:2:0")
speed = info.get("speed", 6)
max_threads = info.get("max_threads", _get_default_max_threads())
codec = info.get("codec", "auto")
if codec != "auto" and not _avif.encoder_codec_available(codec):
msg = "Invalid saving codec"
raise ValueError(msg)
range_ = info.get("range", "full")
tile_rows_log2 = info.get("tile_rows", 0)
tile_cols_log2 = info.get("tile_cols", 0)
alpha_premultiplied = bool(info.get("alpha_premultiplied", False))
autotiling = bool(info.get("autotiling", tile_rows_log2 == tile_cols_log2 == 0))
icc_profile = info.get("icc_profile", im.info.get("icc_profile"))
exif_orientation = 1
if exif := info.get("exif"):
if isinstance(exif, Image.Exif):
exif_data = exif
else:
exif_data = Image.Exif()
exif_data.load(exif)
if ExifTags.Base.Orientation in exif_data:
exif_orientation = exif_data.pop(ExifTags.Base.Orientation)
exif = exif_data.tobytes() if exif_data else b""
elif isinstance(exif, Image.Exif):
exif = exif_data.tobytes()
xmp = info.get("xmp")
if isinstance(xmp, str):
xmp = xmp.encode("utf-8")
advanced = info.get("advanced")
if advanced is not None:
if isinstance(advanced, dict):
advanced = advanced.items()
try:
advanced = tuple(advanced)
except TypeError:
invalid = True
else:
invalid = any(not isinstance(v, tuple) or len(v) != 2 for v in advanced)
if invalid:
msg = (
"advanced codec options must be a dict of key-value string "
"pairs or a series of key-value two-tuples"
)
raise ValueError(msg)
# Setup the AVIF encoder
enc = _avif.AvifEncoder(
im.size,
subsampling,
quality,
speed,
max_threads,
codec,
range_,
tile_rows_log2,
tile_cols_log2,
alpha_premultiplied,
autotiling,
icc_profile or b"",
exif or b"",
exif_orientation,
xmp or b"",
advanced,
)
# Add each frame
frame_idx = 0
frame_duration = 0
cur_idx = im.tell()
is_single_frame = total == 1
try:
for ims in [im] + append_images:
# Get number of frames in this image
nfr = getattr(ims, "n_frames", 1)
for idx in range(nfr):
ims.seek(idx)
# Make sure image mode is supported
frame = ims
rawmode = ims.mode
if ims.mode not in {"RGB", "RGBA"}:
rawmode = "RGBA" if ims.has_transparency_data else "RGB"
frame = ims.convert(rawmode)
# Update frame duration
if isinstance(duration, (list, tuple)):
frame_duration = duration[frame_idx]
else:
frame_duration = duration
# Append the frame to the animation encoder
enc.add(
frame.tobytes("raw", rawmode),
frame_duration,
frame.size,
rawmode,
is_single_frame,
)
# Update frame index
frame_idx += 1
if not save_all:
break
finally:
im.seek(cur_idx)
# Get the final output from the encoder
data = enc.finish()
if data is None:
msg = "cannot write file as AVIF (encoder returned None)"
raise OSError(msg)
fp.write(data)
Image.register_open(AvifImageFile.format, AvifImageFile, _accept)
if SUPPORTED:
Image.register_save(AvifImageFile.format, _save)
Image.register_save_all(AvifImageFile.format, _save_all)
Image.register_extensions(AvifImageFile.format, [".avif", ".avifs"])
Image.register_mime(AvifImageFile.format, "image/avif")
venv\Lib\site-packages\PIL\BdfFontFile.py
#
# The Python Imaging Library
# $Id$
#
# bitmap distribution font (bdf) file parser
#
# history:
# 1996-05-16 fl created (as bdf2pil)
# 1997-08-25 fl converted to FontFile driver
# 2001-05-25 fl removed bogus __init__ call
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
# 2003-04-22 fl more robustification (from Graham Dumpleton)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
"""
Parse X Bitmap Distribution Format (BDF)
"""
from __future__ import annotations
from typing import BinaryIO
from . import FontFile, Image
def bdf_char(
f: BinaryIO,
) -> (
tuple[
str,
int,
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]],
Image.Image,
]
| None
):
# skip to STARTCHAR
while True:
s = f.readline()
if not s:
return None
if s.startswith(b"STARTCHAR"):
break
id = s[9:].strip().decode("ascii")
# load symbol properties
props = {}
while True:
s = f.readline()
if not s or s.startswith(b"BITMAP"):
break
i = s.find(b" ")
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
# load bitmap
bitmap = bytearray()
while True:
s = f.readline()
if not s or s.startswith(b"ENDCHAR"):
break
bitmap += s[:-1]
# The word BBX
# followed by the width in x (BBw), height in y (BBh),
# and x and y displacement (BBxoff0, BByoff0)
# of the lower left corner from the origin of the character.
width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split())
# The word DWIDTH
# followed by the width in x and y of the character in device pixels.
dwx, dwy = (int(p) for p in props["DWIDTH"].split())
bbox = (
(dwx, dwy),
(x_disp, -y_disp - height, width + x_disp, -y_disp),
(0, 0, width, height),
)
try:
im = Image.frombytes("1", (width, height), bitmap, "hex", "1")
except ValueError:
# deal with zero-width characters
im = Image.new("1", (width, height))
return id, int(props["ENCODING"]), bbox, im
class BdfFontFile(FontFile.FontFile):
"""Font file plugin for the X11 BDF format."""
def __init__(self, fp: BinaryIO) -> None:
super().__init__()
s = fp.readline()
if not s.startswith(b"STARTFONT 2.1"):
msg = "not a valid BDF file"
raise SyntaxError(msg)
props = {}
comments = []
while True:
s = fp.readline()
if not s or s.startswith(b"ENDPROPERTIES"):
break
i = s.find(b" ")
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
if s.find(b"LogicalFontDescription") < 0:
comments.append(s[i + 1 : -1].decode("ascii"))
while True:
c = bdf_char(fp)
if not c:
break
id, ch, (xy, dst, src), im = c
if 0 <= ch < len(self.glyph):
self.glyph[ch] = xy, dst, src, im
venv\Lib\site-packages\PIL\BlpImagePlugin.py
"""
Blizzard Mipmap Format (.blp)
Jerome Leclanche
The contents of this file are hereby released in the public domain (CC0)
Full text of the CC0 license:
https://creativecommons.org/publicdomain/zero/1.0/
BLP1 files, used mostly in Warcraft III, are not fully supported.
All types of BLP2 files used in World of Warcraft are supported.
The BLP file structure consists of a header, up to 16 mipmaps of the
texture
Texture sizes must be powers of two, though the two dimensions do
not have to be equal; 512x256 is valid, but 512x200 is not.
The first mipmap (mipmap #0) is the full size image; each subsequent
mipmap halves both dimensions. The final mipmap should be 1x1.
BLP files come in many different flavours:
* JPEG-compressed (type == 0) - only supported for BLP1.
* RAW images (type == 1, encoding == 1). Each mipmap is stored as an
array of 8-bit values, one per pixel, left to right, top to bottom.
Each value is an index to the palette.
* DXT-compressed (type == 1, encoding == 2):
- DXT1 compression is used if alpha_encoding == 0.
- An additional alpha bit is used if alpha_depth == 1.
- DXT3 compression is used if alpha_encoding == 1.
- DXT5 compression is used if alpha_encoding == 7.
"""
from __future__ import annotations
import abc
import os
import struct
from enum import IntEnum
from io import BytesIO
from typing import IO
from . import Image, ImageFile
class Format(IntEnum):
JPEG = 0
class Encoding(IntEnum):
UNCOMPRESSED = 1
DXT = 2
UNCOMPRESSED_RAW_BGRA = 3
class AlphaEncoding(IntEnum):
DXT1 = 0
DXT3 = 1
DXT5 = 7
def unpack_565(i: int) -> tuple[int, int, int]:
return ((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3
def decode_dxt1(
data: bytes, alpha: bool = False
) -> tuple[bytearray, bytearray, bytearray, bytearray]:
"""
input: one "row" of data (i.e. will produce 4*width pixels)
"""
blocks = len(data) // 8 # number of blocks in row
ret = (bytearray(), bytearray(), bytearray(), bytearray())
for block_index in range(blocks):
# Decode next 8-byte block.
idx = block_index * 8
color0, color1, bits = struct.unpack_from("> 2
a = 0xFF
if control == 0:
r, g, b = r0, g0, b0
elif control == 1:
r, g, b = r1, g1, b1
elif control == 2:
if color0 > color1:
r = (2 * r0 + r1) // 3
g = (2 * g0 + g1) // 3
b = (2 * b0 + b1) // 3
else:
r = (r0 + r1) // 2
g = (g0 + g1) // 2
b = (b0 + b1) // 2
elif control == 3:
if color0 > color1:
r = (2 * r1 + r0) // 3
g = (2 * g1 + g0) // 3
b = (2 * b1 + b0) // 3
else:
r, g, b, a = 0, 0, 0, 0
if alpha:
ret[j].extend([r, g, b, a])
else:
ret[j].extend([r, g, b])
return ret
def decode_dxt3(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
"""
input: one "row" of data (i.e. will produce 4*width pixels)
"""
blocks = len(data) // 16 # number of blocks in row
ret = (bytearray(), bytearray(), bytearray(), bytearray())
for block_index in range(blocks):
idx = block_index * 16
block = data[idx : idx + 16]
# Decode next 16-byte block.
bits = struct.unpack_from("<8B", block)
color0, color1 = struct.unpack_from(">= 4
else:
high = True
a &= 0xF
a *= 17 # We get a value between 0 and 15
color_code = (code >> 2 * (4 * j + i)) & 0x03
if color_code == 0:
r, g, b = r0, g0, b0
elif color_code == 1:
r, g, b = r1, g1, b1
elif color_code == 2:
r = (2 * r0 + r1) // 3
g = (2 * g0 + g1) // 3
b = (2 * b0 + b1) // 3
elif color_code == 3:
r = (2 * r1 + r0) // 3
g = (2 * g1 + g0) // 3
b = (2 * b1 + b0) // 3
ret[j].extend([r, g, b, a])
return ret
def decode_dxt5(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
"""
input: one "row" of data (i.e. will produce 4 * width pixels)
"""
blocks = len(data) // 16 # number of blocks in row
ret = (bytearray(), bytearray(), bytearray(), bytearray())
for block_index in range(blocks):
idx = block_index * 16
block = data[idx : idx + 16]
# Decode next 16-byte block.
a0, a1 = struct.unpack_from("> alphacode_index) & 0x07
elif alphacode_index == 15:
alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
else: # alphacode_index >= 18 and alphacode_index <= 45
alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
if alphacode == 0:
a = a0
elif alphacode == 1:
a = a1
elif a0 > a1:
a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
elif alphacode == 6:
a = 0
elif alphacode == 7:
a = 255
else:
a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
color_code = (code >> 2 * (4 * j + i)) & 0x03
if color_code == 0:
r, g, b = r0, g0, b0
elif color_code == 1:
r, g, b = r1, g1, b1
elif color_code == 2:
r = (2 * r0 + r1) // 3
g = (2 * g0 + g1) // 3
b = (2 * b0 + b1) // 3
elif color_code == 3:
r = (2 * r1 + r0) // 3
g = (2 * g1 + g0) // 3
b = (2 * b1 + b0) // 3
ret[j].extend([r, g, b, a])
return ret
class BLPFormatError(NotImplementedError):
pass
def _accept(prefix: bytes) -> bool:
return prefix.startswith((b"BLP1", b"BLP2"))
class BlpImageFile(ImageFile.ImageFile):
"""
Blizzard Mipmap Format
"""
format = "BLP"
format_description = "Blizzard Mipmap Format"
def _open(self) -> None:
self.magic = self.fp.read(4)
if not _accept(self.magic):
msg = f"Bad BLP magic {repr(self.magic)}"
raise BLPFormatError(msg)
compression = struct.unpack(" tuple[int, int]:
try:
self._read_header()
self._load()
except struct.error as e:
msg = "Truncated BLP file"
raise OSError(msg) from e
return -1, 0
@abc.abstractmethod
def _load(self) -> None:
pass
def _read_header(self) -> None:
self._offsets = struct.unpack("<16I", self._safe_read(16 * 4))
self._lengths = struct.unpack("<16I", self._safe_read(16 * 4))
def _safe_read(self, length: int) -> bytes:
assert self.fd is not None
return ImageFile._safe_read(self.fd, length)
def _read_palette(self) -> list[tuple[int, int, int, int]]:
ret = []
for i in range(256):
try:
b, g, r, a = struct.unpack("<4B", self._safe_read(4))
except struct.error:
break
ret.append((b, g, r, a))
return ret
def _read_bgra(
self, palette: list[tuple[int, int, int, int]], alpha: bool
) -> bytearray:
data = bytearray()
_data = BytesIO(self._safe_read(self._lengths[0]))
while True:
try:
(offset,) = struct.unpack(" None:
self._compression, self._encoding, alpha = self.args
if self._compression == Format.JPEG:
self._decode_jpeg_stream()
elif self._compression == 1:
if self._encoding in (4, 5):
palette = self._read_palette()
data = self._read_bgra(palette, alpha)
self.set_as_raw(data)
else:
msg = f"Unsupported BLP encoding {repr(self._encoding)}"
raise BLPFormatError(msg)
else:
msg = f"Unsupported BLP compression {repr(self._encoding)}"
raise BLPFormatError(msg)
def _decode_jpeg_stream(self) -> None:
from .JpegImagePlugin import JpegImageFile
(jpeg_header_size,) = struct.unpack(" None:
self._compression, self._encoding, alpha, self._alpha_encoding = self.args
palette = self._read_palette()
assert self.fd is not None
self.fd.seek(self._offsets[0])
if self._compression == 1:
# Uncompressed or DirectX compression
if self._encoding == Encoding.UNCOMPRESSED:
data = self._read_bgra(palette, alpha)
elif self._encoding == Encoding.DXT:
data = bytearray()
if self._alpha_encoding == AlphaEncoding.DXT1:
linesize = (self.state.xsize + 3) // 4 * 8
for yb in range((self.state.ysize + 3) // 4):
for d in decode_dxt1(self._safe_read(linesize), alpha):
data += d
elif self._alpha_encoding == AlphaEncoding.DXT3:
linesize = (self.state.xsize + 3) // 4 * 16
for yb in range((self.state.ysize + 3) // 4):
for d in decode_dxt3(self._safe_read(linesize)):
data += d
elif self._alpha_encoding == AlphaEncoding.DXT5:
linesize = (self.state.xsize + 3) // 4 * 16
for yb in range((self.state.ysize + 3) // 4):
for d in decode_dxt5(self._safe_read(linesize)):
data += d
else:
msg = f"Unsupported alpha encoding {repr(self._alpha_encoding)}"
raise BLPFormatError(msg)
else:
msg = f"Unknown BLP encoding {repr(self._encoding)}"
raise BLPFormatError(msg)
else:
msg = f"Unknown BLP compression {repr(self._compression)}"
raise BLPFormatError(msg)
self.set_as_raw(data)
class BLPEncoder(ImageFile.PyEncoder):
_pushes_fd = True
def _write_palette(self) -> bytes:
data = b""
assert self.im is not None
palette = self.im.getpalette("RGBA", "RGBA")
for i in range(len(palette) // 4):
r, g, b, a = palette[i * 4 : (i + 1) * 4]
data += struct.pack("<4B", b, g, r, a)
while len(data) < 256 * 4:
data += b"\x00" * 4
return data
def encode(self, bufsize: int) -> tuple[int, int, bytes]:
palette_data = self._write_palette()
offset = 20 + 16 * 4 * 2 + len(palette_data)
data = struct.pack("<16I", offset, *((0,) * 15))
assert self.im is not None
w, h = self.im.size
data += struct.pack("<16I", w * h, *((0,) * 15))
data += palette_data
for y in range(h):
for x in range(w):
data += struct.pack(" None:
if im.mode != "P":
msg = "Unsupported BLP image mode"
raise ValueError(msg)
magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
fp.write(magic)
assert im.palette is not None
fp.write(struct.pack("
venv\Lib\site-packages\PIL\BmpImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# BMP file handler
#
# Windows (and OS/2) native bitmap storage format.
#
# history:
# 1995-09-01 fl Created
# 1996-04-30 fl Added save
# 1997-08-27 fl Fixed save of 1-bit images
# 1998-03-06 fl Load P images as L where possible
# 1998-07-03 fl Load P images as 1 where possible
# 1998-12-29 fl Handle small palettes
# 2002-12-30 fl Fixed load of 1-bit palette images
# 2003-04-21 fl Fixed load of 1-bit monochrome images
# 2003-04-23 fl Added limited support for BI_BITFIELDS compression
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
from typing import IO, Any
from . import Image, ImageFile, ImagePalette
from ._binary import i16le as i16
from ._binary import i32le as i32
from ._binary import o8
from ._binary import o16le as o16
from ._binary import o32le as o32
#
# --------------------------------------------------------------------
# Read BMP file
BIT2MODE = {
# bits => mode, rawmode
1: ("P", "P;1"),
4: ("P", "P;4"),
8: ("P", "P"),
16: ("RGB", "BGR;15"),
24: ("RGB", "BGR"),
32: ("RGB", "BGRX"),
}
USE_RAW_ALPHA = False
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"BM")
def _dib_accept(prefix: bytes) -> bool:
return i32(prefix) in [12, 40, 52, 56, 64, 108, 124]
# =============================================================================
# Image plugin for the Windows BMP format.
# =============================================================================
class BmpImageFile(ImageFile.ImageFile):
"""Image plugin for the Windows Bitmap format (BMP)"""
# ------------------------------------------------------------- Description
format_description = "Windows Bitmap"
format = "BMP"
# -------------------------------------------------- BMP Compression values
COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5}
for k, v in COMPRESSIONS.items():
vars()[k] = v
def _bitmap(self, header: int = 0, offset: int = 0) -> None:
"""Read relevant info about the BMP"""
read, seek = self.fp.read, self.fp.seek
if header:
seek(header)
# read bmp header size @offset 14 (this is part of the header size)
file_info: dict[str, bool | int | tuple[int, ...]] = {
"header_size": i32(read(4)),
"direction": -1,
}
# -------------------- If requested, read header at a specific position
# read the rest of the bmp header, without its size
assert isinstance(file_info["header_size"], int)
header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4)
# ------------------------------- Windows Bitmap v2, IBM OS/2 Bitmap v1
# ----- This format has different offsets because of width/height types
# 12: BITMAPCOREHEADER/OS21XBITMAPHEADER
if file_info["header_size"] == 12:
file_info["width"] = i16(header_data, 0)
file_info["height"] = i16(header_data, 2)
file_info["planes"] = i16(header_data, 4)
file_info["bits"] = i16(header_data, 6)
file_info["compression"] = self.COMPRESSIONS["RAW"]
file_info["palette_padding"] = 3
# --------------------------------------------- Windows Bitmap v3 to v5
# 40: BITMAPINFOHEADER
# 52: BITMAPV2HEADER
# 56: BITMAPV3HEADER
# 64: BITMAPCOREHEADER2/OS22XBITMAPHEADER
# 108: BITMAPV4HEADER
# 124: BITMAPV5HEADER
elif file_info["header_size"] in (40, 52, 56, 64, 108, 124):
file_info["y_flip"] = header_data[7] == 0xFF
file_info["direction"] = 1 if file_info["y_flip"] else -1
file_info["width"] = i32(header_data, 0)
file_info["height"] = (
i32(header_data, 4)
if not file_info["y_flip"]
else 2**32 - i32(header_data, 4)
)
file_info["planes"] = i16(header_data, 8)
file_info["bits"] = i16(header_data, 10)
file_info["compression"] = i32(header_data, 12)
# byte size of pixel data
file_info["data_size"] = i32(header_data, 16)
file_info["pixels_per_meter"] = (
i32(header_data, 20),
i32(header_data, 24),
)
file_info["colors"] = i32(header_data, 28)
file_info["palette_padding"] = 4
assert isinstance(file_info["pixels_per_meter"], tuple)
self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"])
if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]:
masks = ["r_mask", "g_mask", "b_mask"]
if len(header_data) >= 48:
if len(header_data) >= 52:
masks.append("a_mask")
else:
file_info["a_mask"] = 0x0
for idx, mask in enumerate(masks):
file_info[mask] = i32(header_data, 36 + idx * 4)
else:
# 40 byte headers only have the three components in the
# bitfields masks, ref:
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
# See also
# https://github.com/python-pillow/Pillow/issues/1293
# There is a 4th component in the RGBQuad, in the alpha
# location, but it is listed as a reserved component,
# and it is not generally an alpha channel
file_info["a_mask"] = 0x0
for mask in masks:
file_info[mask] = i32(read(4))
assert isinstance(file_info["r_mask"], int)
assert isinstance(file_info["g_mask"], int)
assert isinstance(file_info["b_mask"], int)
assert isinstance(file_info["a_mask"], int)
file_info["rgb_mask"] = (
file_info["r_mask"],
file_info["g_mask"],
file_info["b_mask"],
)
file_info["rgba_mask"] = (
file_info["r_mask"],
file_info["g_mask"],
file_info["b_mask"],
file_info["a_mask"],
)
else:
msg = f"Unsupported BMP header type ({file_info['header_size']})"
raise OSError(msg)
# ------------------ Special case : header is reported 40, which
# ---------------------- is shorter than real size for bpp >= 16
assert isinstance(file_info["width"], int)
assert isinstance(file_info["height"], int)
self._size = file_info["width"], file_info["height"]
# ------- If color count was not found in the header, compute from bits
assert isinstance(file_info["bits"], int)
file_info["colors"] = (
file_info["colors"]
if file_info.get("colors", 0)
else (1 << file_info["bits"])
)
assert isinstance(file_info["colors"], int)
if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8:
offset += 4 * file_info["colors"]
# ---------------------- Check bit depth for unusual unsupported values
self._mode, raw_mode = BIT2MODE.get(file_info["bits"], ("", ""))
if not self.mode:
msg = f"Unsupported BMP pixel depth ({file_info['bits']})"
raise OSError(msg)
# ---------------- Process BMP with Bitfields compression (not palette)
decoder_name = "raw"
if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]:
SUPPORTED: dict[int, list[tuple[int, ...]]] = {
32: [
(0xFF0000, 0xFF00, 0xFF, 0x0),
(0xFF000000, 0xFF0000, 0xFF00, 0x0),
(0xFF000000, 0xFF00, 0xFF, 0x0),
(0xFF000000, 0xFF0000, 0xFF00, 0xFF),
(0xFF, 0xFF00, 0xFF0000, 0xFF000000),
(0xFF0000, 0xFF00, 0xFF, 0xFF000000),
(0xFF000000, 0xFF00, 0xFF, 0xFF0000),
(0x0, 0x0, 0x0, 0x0),
],
24: [(0xFF0000, 0xFF00, 0xFF)],
16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
}
MASK_MODES = {
(32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
(32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR",
(32, (0xFF000000, 0xFF00, 0xFF, 0x0)): "BGXR",
(32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR",
(32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA",
(32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
(32, (0xFF000000, 0xFF00, 0xFF, 0xFF0000)): "BGAR",
(32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
(24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
(16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
(16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
}
if file_info["bits"] in SUPPORTED:
if (
file_info["bits"] == 32
and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
):
assert isinstance(file_info["rgba_mask"], tuple)
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
self._mode = "RGBA" if "A" in raw_mode else self.mode
elif (
file_info["bits"] in (24, 16)
and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
):
assert isinstance(file_info["rgb_mask"], tuple)
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
else:
msg = "Unsupported BMP bitfields layout"
raise OSError(msg)
else:
msg = "Unsupported BMP bitfields layout"
raise OSError(msg)
elif file_info["compression"] == self.COMPRESSIONS["RAW"]:
if file_info["bits"] == 32 and (
header == 22 or USE_RAW_ALPHA # 32-bit .cur offset
):
raw_mode, self._mode = "BGRA", "RGBA"
elif file_info["compression"] in (
self.COMPRESSIONS["RLE8"],
self.COMPRESSIONS["RLE4"],
):
decoder_name = "bmp_rle"
else:
msg = f"Unsupported BMP compression ({file_info['compression']})"
raise OSError(msg)
# --------------- Once the header is processed, process the palette/LUT
if self.mode == "P": # Paletted for 1, 4 and 8 bit images
# ---------------------------------------------------- 1-bit images
if not (0 < file_info["colors"] <= 65536):
msg = f"Unsupported BMP Palette size ({file_info['colors']})"
raise OSError(msg)
else:
assert isinstance(file_info["palette_padding"], int)
padding = file_info["palette_padding"]
palette = read(padding * file_info["colors"])
grayscale = True
indices = (
(0, 255)
if file_info["colors"] == 2
else list(range(file_info["colors"]))
)
# ----------------- Check if grayscale and ignore palette if so
for ind, val in enumerate(indices):
rgb = palette[ind * padding : ind * padding + 3]
if rgb != o8(val) * 3:
grayscale = False
# ------- If all colors are gray, white or black, ditch palette
if grayscale:
self._mode = "1" if file_info["colors"] == 2 else "L"
raw_mode = self.mode
else:
self._mode = "P"
self.palette = ImagePalette.raw(
"BGRX" if padding == 4 else "BGR", palette
)
# ---------------------------- Finally set the tile data for the plugin
self.info["compression"] = file_info["compression"]
args: list[Any] = [raw_mode]
if decoder_name == "bmp_rle":
args.append(file_info["compression"] == self.COMPRESSIONS["RLE4"])
else:
assert isinstance(file_info["width"], int)
args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3))
args.append(file_info["direction"])
self.tile = [
ImageFile._Tile(
decoder_name,
(0, 0, file_info["width"], file_info["height"]),
offset or self.fp.tell(),
tuple(args),
)
]
def _open(self) -> None:
"""Open file, check magic number and read header"""
# read 14 bytes: magic number, filesize, reserved, header final offset
head_data = self.fp.read(14)
# choke if the file does not have the required magic bytes
if not _accept(head_data):
msg = "Not a BMP file"
raise SyntaxError(msg)
# read the start position of the BMP image data (u32)
offset = i32(head_data, 10)
# load bitmap information (offset=raster info)
self._bitmap(offset=offset)
class BmpRleDecoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
rle4 = self.args[1]
data = bytearray()
x = 0
dest_length = self.state.xsize * self.state.ysize
while len(data) < dest_length:
pixels = self.fd.read(1)
byte = self.fd.read(1)
if not pixels or not byte:
break
num_pixels = pixels[0]
if num_pixels:
# encoded mode
if x + num_pixels > self.state.xsize:
# Too much data for row
num_pixels = max(0, self.state.xsize - x)
if rle4:
first_pixel = o8(byte[0] >> 4)
second_pixel = o8(byte[0] & 0x0F)
for index in range(num_pixels):
if index % 2 == 0:
data += first_pixel
else:
data += second_pixel
else:
data += byte * num_pixels
x += num_pixels
else:
if byte[0] == 0:
# end of line
while len(data) % self.state.xsize != 0:
data += b"\x00"
x = 0
elif byte[0] == 1:
# end of bitmap
break
elif byte[0] == 2:
# delta
bytes_read = self.fd.read(2)
if len(bytes_read) < 2:
break
right, up = self.fd.read(2)
data += b"\x00" * (right + up * self.state.xsize)
x = len(data) % self.state.xsize
else:
# absolute mode
if rle4:
# 2 pixels per byte
byte_count = byte[0] // 2
bytes_read = self.fd.read(byte_count)
for byte_read in bytes_read:
data += o8(byte_read >> 4)
data += o8(byte_read & 0x0F)
else:
byte_count = byte[0]
bytes_read = self.fd.read(byte_count)
data += bytes_read
if len(bytes_read) < byte_count:
break
x += byte[0]
# align to 16-bit word boundary
if self.fd.tell() % 2 != 0:
self.fd.seek(1, os.SEEK_CUR)
rawmode = "L" if self.mode == "L" else "P"
self.set_as_raw(bytes(data), rawmode, (0, self.args[-1]))
return -1, 0
# =============================================================================
# Image plugin for the DIB format (BMP alias)
# =============================================================================
class DibImageFile(BmpImageFile):
format = "DIB"
format_description = "Windows Bitmap"
def _open(self) -> None:
self._bitmap()
#
# --------------------------------------------------------------------
# Write BMP file
SAVE = {
"1": ("1", 1, 2),
"L": ("L", 8, 256),
"P": ("P", 8, 256),
"RGB": ("BGR", 24, 0),
"RGBA": ("BGRA", 32, 0),
}
def _dib_save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
_save(im, fp, filename, False)
def _save(
im: Image.Image, fp: IO[bytes], filename: str | bytes, bitmap_header: bool = True
) -> None:
try:
rawmode, bits, colors = SAVE[im.mode]
except KeyError as e:
msg = f"cannot write mode {im.mode} as BMP"
raise OSError(msg) from e
info = im.encoderinfo
dpi = info.get("dpi", (96, 96))
# 1 meter == 39.3701 inches
ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi)
stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
header = 40 # or 64 for OS/2 version 2
image = stride * im.size[1]
if im.mode == "1":
palette = b"".join(o8(i) * 3 + b"\x00" for i in (0, 255))
elif im.mode == "L":
palette = b"".join(o8(i) * 3 + b"\x00" for i in range(256))
elif im.mode == "P":
palette = im.im.getpalette("RGB", "BGRX")
colors = len(palette) // 4
else:
palette = None
# bitmap header
if bitmap_header:
offset = 14 + header + colors * 4
file_size = offset + image
if file_size > 2**32 - 1:
msg = "File size is too large for the BMP format"
raise ValueError(msg)
fp.write(
b"BM" # file type (magic)
+ o32(file_size) # file size
+ o32(0) # reserved
+ o32(offset) # image data offset
)
# bitmap info header
fp.write(
o32(header) # info header size
+ o32(im.size[0]) # width
+ o32(im.size[1]) # height
+ o16(1) # planes
+ o16(bits) # depth
+ o32(0) # compression (0=uncompressed)
+ o32(image) # size of bitmap
+ o32(ppm[0]) # resolution
+ o32(ppm[1]) # resolution
+ o32(colors) # colors used
+ o32(colors) # colors important
)
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
if palette:
fp.write(palette)
ImageFile._save(
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]
)
#
# --------------------------------------------------------------------
# Registry
Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
Image.register_save(BmpImageFile.format, _save)
Image.register_extension(BmpImageFile.format, ".bmp")
Image.register_mime(BmpImageFile.format, "image/bmp")
Image.register_decoder("bmp_rle", BmpRleDecoder)
Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
Image.register_save(DibImageFile.format, _dib_save)
Image.register_extension(DibImageFile.format, ".dib")
Image.register_mime(DibImageFile.format, "image/bmp")
venv\Lib\site-packages\PIL\BufrStubImagePlugin.py
#
# The Python Imaging Library
# $Id$
#
# BUFR stub adapter
#
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
from typing import IO
from . import Image, ImageFile
_handler = None
def register_handler(handler: ImageFile.StubHandler | None) -> None:
"""
Install application-specific BUFR image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix: bytes) -> bool:
return prefix.startswith((b"BUFR", b"ZCZC"))
class BufrStubImageFile(ImageFile.StubImageFile):
format = "BUFR"
format_description = "BUFR"
def _open(self) -> None:
if not _accept(self.fp.read(4)):
msg = "Not a BUFR file"
raise SyntaxError(msg)
self.fp.seek(-4, os.SEEK_CUR)
# make something up
self._mode = "F"
self._size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self) -> ImageFile.StubHandler | None:
return _handler
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if _handler is None or not hasattr(_handler, "save"):
msg = "BUFR save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
Image.register_save(BufrStubImageFile.format, _save)
Image.register_extension(BufrStubImageFile.format, ".bufr")
venv\Lib\site-packages\PIL\ContainerIO.py
#
# The Python Imaging Library.
# $Id$
#
# a class to read from a container file
#
# History:
# 1995-06-18 fl Created
# 1995-09-07 fl Added readline(), readlines()
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
from collections.abc import Iterable
from typing import IO, AnyStr, NoReturn
class ContainerIO(IO[AnyStr]):
"""
A file object that provides read access to a part of an existing
file (for example a TAR file).
"""
def __init__(self, file: IO[AnyStr], offset: int, length: int) -> None:
"""
Create file object.
:param file: Existing file.
:param offset: Start of region, in bytes.
:param length: Size of region, in bytes.
"""
self.fh: IO[AnyStr] = file
self.pos = 0
self.offset = offset
self.length = length
self.fh.seek(offset)
##
# Always false.
def isatty(self) -> bool:
return False
def seekable(self) -> bool:
return True
def seek(self, offset: int, mode: int = io.SEEK_SET) -> int:
"""
Move file pointer.
:param offset: Offset in bytes.
:param mode: Starting position. Use 0 for beginning of region, 1
for current offset, and 2 for end of region. You cannot move
the pointer outside the defined region.
:returns: Offset from start of region, in bytes.
"""
if mode == 1:
self.pos = self.pos + offset
elif mode == 2:
self.pos = self.length + offset
else:
self.pos = offset
# clamp
self.pos = max(0, min(self.pos, self.length))
self.fh.seek(self.offset + self.pos)
return self.pos
def tell(self) -> int:
"""
Get current file pointer.
:returns: Offset from start of region, in bytes.
"""
return self.pos
def readable(self) -> bool:
return True
def read(self, n: int = -1) -> AnyStr:
"""
Read data.
:param n: Number of bytes to read. If omitted, zero or negative,
read until end of region.
:returns: An 8-bit string.
"""
if n > 0:
n = min(n, self.length - self.pos)
else:
n = self.length - self.pos
if n <= 0: # EOF
return b"" if "b" in self.fh.mode else "" # type: ignore[return-value]
self.pos = self.pos + n
return self.fh.read(n)
def readline(self, n: int = -1) -> AnyStr:
"""
Read a line of text.
:param n: Number of bytes to read. If omitted, zero or negative,
read until end of line.
:returns: An 8-bit string.
"""
s: AnyStr = b"" if "b" in self.fh.mode else "" # type: ignore[assignment]
newline_character = b"\n" if "b" in self.fh.mode else "\n"
while True:
c = self.read(1)
if not c:
break
s = s + c
if c == newline_character or len(s) == n:
break
return s
def readlines(self, n: int | None = -1) -> list[AnyStr]:
"""
Read multiple lines of text.
:param n: Number of lines to read. If omitted, zero, negative or None,
read until end of region.
:returns: A list of 8-bit strings.
"""
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
if len(lines) == n:
break
return lines
def writable(self) -> bool:
return False
def write(self, b: AnyStr) -> NoReturn:
raise NotImplementedError()
def writelines(self, lines: Iterable[AnyStr]) -> NoReturn:
raise NotImplementedError()
def truncate(self, size: int | None = None) -> int:
raise NotImplementedError()
def __enter__(self) -> ContainerIO[AnyStr]:
return self
def __exit__(self, *args: object) -> None:
self.close()
def __iter__(self) -> ContainerIO[AnyStr]:
return self
def __next__(self) -> AnyStr:
line = self.readline()
if not line:
msg = "end of region"
raise StopIteration(msg)
return line
def fileno(self) -> int:
return self.fh.fileno()
def flush(self) -> None:
self.fh.flush()
def close(self) -> None:
self.fh.close()
venv\Lib\site-packages\PIL\CurImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# Windows Cursor support for PIL
#
# notes:
# uses BmpImagePlugin.py to read the bitmap data.
#
# history:
# 96-05-27 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import BmpImagePlugin, Image, ImageFile
from ._binary import i16le as i16
from ._binary import i32le as i32
#
# --------------------------------------------------------------------
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"\0\0\2\0")
##
# Image plugin for Windows Cursor files.
class CurImageFile(BmpImagePlugin.BmpImageFile):
format = "CUR"
format_description = "Windows Cursor"
def _open(self) -> None:
offset = self.fp.tell()
# check magic
s = self.fp.read(6)
if not _accept(s):
msg = "not a CUR file"
raise SyntaxError(msg)
# pick the largest cursor in the file
m = b""
for i in range(i16(s, 4)):
s = self.fp.read(16)
if not m:
m = s
elif s[0] > m[0] and s[1] > m[1]:
m = s
if not m:
msg = "No cursors were found"
raise TypeError(msg)
# load as bitmap
self._bitmap(i32(m, 12) + offset)
# patch up the bitmap height
self._size = self.size[0], self.size[1] // 2
d, e, o, a = self.tile[0]
self.tile[0] = ImageFile._Tile(d, (0, 0) + self.size, o, a)
#
# --------------------------------------------------------------------
Image.register_open(CurImageFile.format, CurImageFile, _accept)
Image.register_extension(CurImageFile.format, ".cur")
venv\Lib\site-packages\PIL\DcxImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# DCX file handling
#
# DCX is a container file format defined by Intel, commonly used
# for fax applications. Each DCX file consists of a directory
# (a list of file offsets) followed by a set of (usually 1-bit)
# PCX files.
#
# History:
# 1995-09-09 fl Created
# 1996-03-20 fl Properly derived from PcxImageFile.
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 2002-07-30 fl Fixed file handling
#
# Copyright (c) 1997-98 by Secret Labs AB.
# Copyright (c) 1995-96 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image
from ._binary import i32le as i32
from ._util import DeferredError
from .PcxImagePlugin import PcxImageFile
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
def _accept(prefix: bytes) -> bool:
return len(prefix) >= 4 and i32(prefix) == MAGIC
##
# Image plugin for the Intel DCX format.
class DcxImageFile(PcxImageFile):
format = "DCX"
format_description = "Intel DCX"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# Header
s = self.fp.read(4)
if not _accept(s):
msg = "not a DCX file"
raise SyntaxError(msg)
# Component directory
self._offset = []
for i in range(1024):
offset = i32(self.fp.read(4))
if not offset:
break
self._offset.append(offset)
self._fp = self.fp
self.frame = -1
self.n_frames = len(self._offset)
self.is_animated = self.n_frames > 1
self.seek(0)
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.frame = frame
self.fp = self._fp
self.fp.seek(self._offset[frame])
PcxImageFile._open(self)
def tell(self) -> int:
return self.frame
Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
Image.register_extension(DcxImageFile.format, ".dcx")
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
# resizing
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
import os
import re
import subprocess
import sys
import tempfile
from typing import IO
from . import Image, ImageFile
from ._binary import i32le as i32
# --------------------------------------------------------------------
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_binary: str | bool | None = None
gs_windows_binary = None
def has_ghostscript() -> bool:
global gs_binary, gs_windows_binary
if gs_binary is None:
if sys.platform.startswith("win"):
if gs_windows_binary is None:
import shutil
for binary in ("gswin32c", "gswin64c", "gs"):
if shutil.which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
gs_binary = gs_windows_binary
else:
try:
subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL)
gs_binary = "gs"
except OSError:
gs_binary = False
return gs_binary is not False
def Ghostscript(
tile: list[ImageFile._Tile],
size: tuple[int, int],
fp: IO[bytes],
scale: int = 1,
transparency: bool = False,
) -> Image.core.ImagingCore:
"""Render an image using Ghostscript"""
global gs_binary
if not has_ghostscript():
msg = "Unable to locate Ghostscript on paths"
raise OSError(msg)
assert isinstance(gs_binary, str)
# Unpack decoder tile
args = tile[0].args
assert isinstance(args, tuple)
length, bbox = args
# Hack to support hi-res rendering
scale = int(scale) or 1
width = size[0] * scale
height = size[1] * scale
# resolution is dependent on bbox and size
res_x = 72.0 * width / (bbox[2] - bbox[0])
res_y = 72.0 * height / (bbox[3] - bbox[1])
out_fd, outfile = tempfile.mkstemp()
os.close(out_fd)
infile_temp = None
if hasattr(fp, "name") and os.path.exists(fp.name):
infile = fp.name
else:
in_fd, infile_temp = tempfile.mkstemp()
os.close(in_fd)
infile = infile_temp
# Ignore length and offset!
# Ghostscript can read it
# Copy whole file to read in Ghostscript
with open(infile_temp, "wb") as f:
# fetch length of fp
fp.seek(0, io.SEEK_END)
fsize = fp.tell()
# ensure start position
# go back
fp.seek(0)
lengthfile = fsize
while lengthfile > 0:
s = fp.read(min(lengthfile, 100 * 1024))
if not s:
break
lengthfile -= len(s)
f.write(s)
if transparency:
# "RGBA"
device = "pngalpha"
else:
# "pnmraw" automatically chooses between
# PBM ("1"), PGM ("L"), and PPM ("RGB").
device = "pnmraw"
# Build Ghostscript command
command = [
gs_binary,
"-q", # quiet mode
f"-g{width:d}x{height:d}", # set output geometry (pixels)
f"-r{res_x:f}x{res_y:f}", # set input DPI (dots per inch)
"-dBATCH", # exit after processing
"-dNOPAUSE", # don't pause between pages
"-dSAFER", # safe mode
f"-sDEVICE={device}",
f"-sOutputFile={outfile}", # output file
# adjust for image origin
"-c",
f"{-bbox[0]} {-bbox[1]} translate",
"-f",
infile, # input file
# showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)
"-c",
"showpage",
]
# push data through Ghostscript
try:
startupinfo = None
if sys.platform.startswith("win"):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.check_call(command, startupinfo=startupinfo)
with Image.open(outfile) as out_im:
out_im.load()
return out_im.im.copy()
finally:
try:
os.unlink(outfile)
if infile_temp:
os.unlink(infile_temp)
except OSError:
pass
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"%!PS") or (
len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5
)
##
# Image plugin for Encapsulated PostScript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"}
def _open(self) -> None:
(length, offset) = self._find_offset(self.fp)
# go to offset - start of "%!PS"
self.fp.seek(offset)
self._mode = "RGB"
# When reading header comments, the first comment is used.
# When reading trailer comments, the last comment is used.
bounding_box: list[int] | None = None
imagedata_size: tuple[int, int] | None = None
byte_arr = bytearray(255)
bytes_mv = memoryview(byte_arr)
bytes_read = 0
reading_header_comments = True
reading_trailer_comments = False
trailer_reached = False
def check_required_header_comments() -> None:
"""
The EPS specification requires that some headers exist.
This should be checked when the header comments formally end,
when image data starts, or when the file ends, whichever comes first.
"""
if "PS-Adobe" not in self.info:
msg = 'EPS header missing "%!PS-Adobe" comment'
raise SyntaxError(msg)
if "BoundingBox" not in self.info:
msg = 'EPS header missing "%%BoundingBox" comment'
raise SyntaxError(msg)
def read_comment(s: str) -> bool:
nonlocal bounding_box, reading_trailer_comments
try:
m = split.match(s)
except re.error as e:
msg = "not an EPS file"
raise SyntaxError(msg) from e
if not m:
return False
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
if v == "(atend)":
reading_trailer_comments = True
elif not bounding_box or (trailer_reached and reading_trailer_comments):
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
bounding_box = [int(float(i)) for i in v.split()]
except Exception:
pass
return True
while True:
byte = self.fp.read(1)
if byte == b"":
# if we didn't read a byte we must be at the end of the file
if bytes_read == 0:
if reading_header_comments:
check_required_header_comments()
break
elif byte in b"\r\n":
# if we read a line ending character, ignore it and parse what
# we have already read. if we haven't read any other characters,
# continue reading
if bytes_read == 0:
continue
else:
# ASCII/hexadecimal lines in an EPS file must not exceed
# 255 characters, not including line ending characters
if bytes_read >= 255:
# only enforce this for lines starting with a "%",
# otherwise assume it's binary data
if byte_arr[0] == ord("%"):
msg = "not an EPS file"
raise SyntaxError(msg)
else:
if reading_header_comments:
check_required_header_comments()
reading_header_comments = False
# reset bytes_read so we can keep reading
# data until the end of the line
bytes_read = 0
byte_arr[bytes_read] = byte[0]
bytes_read += 1
continue
if reading_header_comments:
# Load EPS header
# if this line doesn't start with a "%",
# or does start with "%%EndComments",
# then we've reached the end of the header/comments
if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments":
check_required_header_comments()
reading_header_comments = False
continue
s = str(bytes_mv[:bytes_read], "latin-1")
if not read_comment(s):
m = field.match(s)
if m:
k = m.group(1)
if k.startswith("PS-Adobe"):
self.info["PS-Adobe"] = k[9:]
else:
self.info[k] = ""
elif s[0] == "%":
# handle non-DSC PostScript comments that some
# tools mistakenly put in the Comments section
pass
else:
msg = "bad EPS header"
raise OSError(msg)
elif bytes_mv[:11] == b"%ImageData:":
# Check for an "ImageData" descriptor
# https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096
# If we've already read an "ImageData" descriptor,
# don't read another one.
if imagedata_size:
bytes_read = 0
continue
# Values:
# columns
# rows
# bit depth (1 or 8)
# mode (1: L, 2: LAB, 3: RGB, 4: CMYK)
# number of padding channels
# block size (number of bytes per row per channel)
# binary/ascii (1: binary, 2: ascii)
# data start identifier (the image data follows after a single line
# consisting only of this quoted value)
image_data_values = byte_arr[11:bytes_read].split(None, 7)
columns, rows, bit_depth, mode_id = (
int(value) for value in image_data_values[:4]
)
if bit_depth == 1:
self._mode = "1"
elif bit_depth == 8:
try:
self._mode = self.mode_map[mode_id]
except ValueError:
break
else:
break
# Parse the columns and rows after checking the bit depth and mode
# in case the bit depth and/or mode are invalid.
imagedata_size = columns, rows
elif bytes_mv[:5] == b"%%EOF":
break
elif trailer_reached and reading_trailer_comments:
# Load EPS trailer
s = str(bytes_mv[:bytes_read], "latin-1")
read_comment(s)
elif bytes_mv[:9] == b"%%Trailer":
trailer_reached = True
bytes_read = 0
# A "BoundingBox" is always required,
# even if an "ImageData" descriptor size exists.
if not bounding_box:
msg = "cannot determine EPS bounding box"
raise OSError(msg)
# An "ImageData" size takes precedence over the "BoundingBox".
self._size = imagedata_size or (
bounding_box[2] - bounding_box[0],
bounding_box[3] - bounding_box[1],
)
self.tile = [
ImageFile._Tile("eps", (0, 0) + self.size, offset, (length, bounding_box))
]
def _find_offset(self, fp: IO[bytes]) -> tuple[int, int]:
s = fp.read(4)
if s == b"%!PS":
# for HEAD without binary preview
fp.seek(0, io.SEEK_END)
length = fp.tell()
offset = 0
elif i32(s) == 0xC6D3D0C5:
# FIX for: Some EPS file not handled correctly / issue #302
# EPS can contain binary data
# or start directly with latin coding
# more info see:
# https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
s = fp.read(8)
offset = i32(s)
length = i32(s, 4)
else:
msg = "not an EPS file"
raise SyntaxError(msg)
return length, offset
def load(
self, scale: int = 1, transparency: bool = False
) -> Image.core.PixelAccess | None:
# Load EPS via Ghostscript
if self.tile:
self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency)
self._mode = self.im.mode
self._size = self.im.size
self.tile = []
return Image.Image.load(self)
def load_seek(self, pos: int) -> None:
# we can't incrementally load, so force ImageFile.parser to
# use our custom load method by defining this method.
pass
# --------------------------------------------------------------------
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes, eps: int = 1) -> None:
"""EPS Writer for the Python Imaging Library."""
# make sure image data is available
im.load()
# determine PostScript image mode
if im.mode == "L":
operator = (8, 1, b"image")
elif im.mode == "RGB":
operator = (8, 3, b"false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, b"false 4 colorimage")
else:
msg = "image mode is not supported"
raise ValueError(msg)
if eps:
# write EPS header
fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write(b"%%Creator: PIL 0.1 EpsEncode\n")
# fp.write("%%CreationDate: %s"...)
fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write(b"%%Pages: 1\n")
fp.write(b"%%EndComments\n")
fp.write(b"%%Page: 1 1\n")
fp.write(b"%%ImageData: %d %d " % im.size)
fp.write(b'%d %d 0 1 1 "%s"\n' % operator)
# image header
fp.write(b"gsave\n")
fp.write(b"10 dict begin\n")
fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write(b"%d %d scale\n" % im.size)
fp.write(b"%d %d 8\n" % im.size) # <= bits
fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write(b"{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + b"\n")
if hasattr(fp, "flush"):
fp.flush()
ImageFile._save(im, fp, [ImageFile._Tile("eps", (0, 0) + im.size)])
fp.write(b"\n%%%%EndBinary\n")
fp.write(b"grestore end\n")
if hasattr(fp, "flush"):
fp.flush()
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extensions(EpsImageFile.format, [".ps", ".eps"])
Image.register_mime(EpsImageFile.format, "application/postscript")
from __future__ import annotations
import collections
import os
import sys
import warnings
from typing import IO
import PIL
from . import Image
from ._deprecate import deprecate
modules = {
"pil": ("PIL._imaging", "PILLOW_VERSION"),
"tkinter": ("PIL._tkinter_finder", "tk_version"),
"freetype2": ("PIL._imagingft", "freetype2_version"),
"littlecms2": ("PIL._imagingcms", "littlecms_version"),
"webp": ("PIL._webp", "webpdecoder_version"),
"avif": ("PIL._avif", "libavif_version"),
}
def check_module(feature: str) -> bool:
"""
Checks if a module is available.
:param feature: The module to check for.
:returns: ``True`` if available, ``False`` otherwise.
:raises ValueError: If the module is not defined in this version of Pillow.
"""
if feature not in modules:
msg = f"Unknown module {feature}"
raise ValueError(msg)
module, ver = modules[feature]
try:
__import__(module)
return True
except ModuleNotFoundError:
return False
except ImportError as ex:
warnings.warn(str(ex))
return False
def version_module(feature: str) -> str | None:
"""
:param feature: The module to check for.
:returns:
The loaded version number as a string, or ``None`` if unknown or not available.
:raises ValueError: If the module is not defined in this version of Pillow.
"""
if not check_module(feature):
return None
module, ver = modules[feature]
return getattr(__import__(module, fromlist=[ver]), ver)
def get_supported_modules() -> list[str]:
"""
:returns: A list of all supported modules.
"""
return [f for f in modules if check_module(f)]
codecs = {
"jpg": ("jpeg", "jpeglib"),
"jpg_2000": ("jpeg2k", "jp2klib"),
"zlib": ("zip", "zlib"),
"libtiff": ("libtiff", "libtiff"),
}
def check_codec(feature: str) -> bool:
"""
Checks if a codec is available.
:param feature: The codec to check for.
:returns: ``True`` if available, ``False`` otherwise.
:raises ValueError: If the codec is not defined in this version of Pillow.
"""
if feature not in codecs:
msg = f"Unknown codec {feature}"
raise ValueError(msg)
codec, lib = codecs[feature]
return f"{codec}_encoder" in dir(Image.core)
def version_codec(feature: str) -> str | None:
"""
:param feature: The codec to check for.
:returns:
The version number as a string, or ``None`` if not available.
Checked at compile time for ``jpg``, run-time otherwise.
:raises ValueError: If the codec is not defined in this version of Pillow.
"""
if not check_codec(feature):
return None
codec, lib = codecs[feature]
version = getattr(Image.core, f"{lib}_version")
if feature == "libtiff":
return version.split("\n")[0].split("Version ")[1]
return version
def get_supported_codecs() -> list[str]:
"""
:returns: A list of all supported codecs.
"""
return [f for f in codecs if check_codec(f)]
features: dict[str, tuple[str, str | bool, str | None]] = {
"webp_anim": ("PIL._webp", True, None),
"webp_mux": ("PIL._webp", True, None),
"transp_webp": ("PIL._webp", True, None),
"raqm": ("PIL._imagingft", "HAVE_RAQM", "raqm_version"),
"fribidi": ("PIL._imagingft", "HAVE_FRIBIDI", "fribidi_version"),
"harfbuzz": ("PIL._imagingft", "HAVE_HARFBUZZ", "harfbuzz_version"),
"libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"),
"mozjpeg": ("PIL._imaging", "HAVE_MOZJPEG", "libjpeg_turbo_version"),
"zlib_ng": ("PIL._imaging", "HAVE_ZLIBNG", "zlib_ng_version"),
"libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"),
"xcb": ("PIL._imaging", "HAVE_XCB", None),
}
def check_feature(feature: str) -> bool | None:
"""
Checks if a feature is available.
:param feature: The feature to check for.
:returns: ``True`` if available, ``False`` if unavailable, ``None`` if unknown.
:raises ValueError: If the feature is not defined in this version of Pillow.
"""
if feature not in features:
msg = f"Unknown feature {feature}"
raise ValueError(msg)
module, flag, ver = features[feature]
if isinstance(flag, bool):
deprecate(f'check_feature("{feature}")', 12)
try:
imported_module = __import__(module, fromlist=["PIL"])
if isinstance(flag, bool):
return flag
return getattr(imported_module, flag)
except ModuleNotFoundError:
return None
except ImportError as ex:
warnings.warn(str(ex))
return None
def version_feature(feature: str) -> str | None:
"""
:param feature: The feature to check for.
:returns: The version number as a string, or ``None`` if not available.
:raises ValueError: If the feature is not defined in this version of Pillow.
"""
if not check_feature(feature):
return None
module, flag, ver = features[feature]
if ver is None:
return None
return getattr(__import__(module, fromlist=[ver]), ver)
def get_supported_features() -> list[str]:
"""
:returns: A list of all supported features.
"""
supported_features = []
for f, (module, flag, _) in features.items():
if flag is True:
for feature, (feature_module, _) in modules.items():
if feature_module == module:
if check_module(feature):
supported_features.append(f)
break
elif check_feature(f):
supported_features.append(f)
return supported_features
def check(feature: str) -> bool | None:
"""
:param feature: A module, codec, or feature name.
:returns:
``True`` if the module, codec, or feature is available,
``False`` or ``None`` otherwise.
"""
if feature in modules:
return check_module(feature)
if feature in codecs:
return check_codec(feature)
if feature in features:
return check_feature(feature)
warnings.warn(f"Unknown feature '{feature}'.", stacklevel=2)
return False
def version(feature: str) -> str | None:
"""
:param feature:
The module, codec, or feature to check for.
:returns:
The version number as a string, or ``None`` if unknown or not available.
"""
if feature in modules:
return version_module(feature)
if feature in codecs:
return version_codec(feature)
if feature in features:
return version_feature(feature)
return None
def get_supported() -> list[str]:
"""
:returns: A list of all supported modules, features, and codecs.
"""
ret = get_supported_modules()
ret.extend(get_supported_features())
ret.extend(get_supported_codecs())
return ret
def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None:
"""
Prints information about this installation of Pillow.
This function can be called with ``python3 -m PIL``.
It can also be called with ``python3 -m PIL.report`` or ``python3 -m PIL --report``
to have "supported_formats" set to ``False``, omitting the list of all supported
image file formats.
:param out:
The output stream to print to. Defaults to ``sys.stdout`` if ``None``.
:param supported_formats:
If ``True``, a list of all supported image file formats will be printed.
"""
if out is None:
out = sys.stdout
Image.init()
print("-" * 68, file=out)
print(f"Pillow {PIL.__version__}", file=out)
py_version_lines = sys.version.splitlines()
print(f"Python {py_version_lines[0].strip()}", file=out)
for py_version in py_version_lines[1:]:
print(f" {py_version.strip()}", file=out)
print("-" * 68, file=out)
print(f"Python executable is {sys.executable or 'unknown'}", file=out)
if sys.prefix != sys.base_prefix:
print(f"Environment Python files loaded from {sys.prefix}", file=out)
print(f"System Python files loaded from {sys.base_prefix}", file=out)
print("-" * 68, file=out)
print(
f"Python Pillow modules loaded from {os.path.dirname(Image.__file__)}",
file=out,
)
print(
f"Binary Pillow modules loaded from {os.path.dirname(Image.core.__file__)}",
file=out,
)
print("-" * 68, file=out)
for name, feature in [
("pil", "PIL CORE"),
("tkinter", "TKINTER"),
("freetype2", "FREETYPE2"),
("littlecms2", "LITTLECMS2"),
("webp", "WEBP"),
("avif", "AVIF"),
("jpg", "JPEG"),
("jpg_2000", "OPENJPEG (JPEG2000)"),
("zlib", "ZLIB (PNG/ZIP)"),
("libtiff", "LIBTIFF"),
("raqm", "RAQM (Bidirectional Text)"),
("libimagequant", "LIBIMAGEQUANT (Quantization method)"),
("xcb", "XCB (X protocol)"),
]:
if check(name):
v: str | None = None
if name == "jpg":
libjpeg_turbo_version = version_feature("libjpeg_turbo")
if libjpeg_turbo_version is not None:
v = "mozjpeg" if check_feature("mozjpeg") else "libjpeg-turbo"
v += " " + libjpeg_turbo_version
if v is None:
v = version(name)
if v is not None:
version_static = name in ("pil", "jpg")
if name == "littlecms2":
# this check is also in src/_imagingcms.c:setup_module()
version_static = tuple(int(x) for x in v.split(".")) < (2, 7)
t = "compiled for" if version_static else "loaded"
if name == "zlib":
zlib_ng_version = version_feature("zlib_ng")
if zlib_ng_version is not None:
v += ", compiled for zlib-ng " + zlib_ng_version
elif name == "raqm":
for f in ("fribidi", "harfbuzz"):
v2 = version_feature(f)
if v2 is not None:
v += f", {f} {v2}"
print("---", feature, "support ok,", t, v, file=out)
else:
print("---", feature, "support ok", file=out)
else:
print("***", feature, "support not installed", file=out)
print("-" * 68, file=out)
if supported_formats:
extensions = collections.defaultdict(list)
for ext, i in Image.EXTENSION.items():
extensions[i].append(ext)
for i in sorted(Image.ID):
line = f"{i}"
if i in Image.MIME:
line = f"{line} {Image.MIME[i]}"
print(line, file=out)
if i in extensions:
print(
"Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out
)
features = []
if i in Image.OPEN:
features.append("open")
if i in Image.SAVE:
features.append("save")
if i in Image.SAVE_ALL:
features.append("save_all")
if i in Image.DECODERS:
features.append("decode")
if i in Image.ENCODERS:
features.append("encode")
print("Features: {}".format(", ".join(features)), file=out)
print("-" * 68, file=out)
venv\Lib\site-packages\PIL\FitsImagePlugin.py
#
# The Python Imaging Library
# $Id$
#
# FITS file handling
#
# Copyright (c) 1998-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import gzip
import math
from . import Image, ImageFile
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"SIMPLE")
class FitsImageFile(ImageFile.ImageFile):
format = "FITS"
format_description = "FITS"
def _open(self) -> None:
assert self.fp is not None
headers: dict[bytes, bytes] = {}
header_in_progress = False
decoder_name = ""
while True:
header = self.fp.read(80)
if not header:
msg = "Truncated FITS file"
raise OSError(msg)
keyword = header[:8].strip()
if keyword in (b"SIMPLE", b"XTENSION"):
header_in_progress = True
elif headers and not header_in_progress:
# This is now a data unit
break
elif keyword == b"END":
# Seek to the end of the header unit
self.fp.seek(math.ceil(self.fp.tell() / 2880) * 2880)
if not decoder_name:
decoder_name, offset, args = self._parse_headers(headers)
header_in_progress = False
continue
if decoder_name:
# Keep going to read past the headers
continue
value = header[8:].split(b"/")[0].strip()
if value.startswith(b"="):
value = value[1:].strip()
if not headers and (not _accept(keyword) or value != b"T"):
msg = "Not a FITS file"
raise SyntaxError(msg)
headers[keyword] = value
if not decoder_name:
msg = "No image data"
raise ValueError(msg)
offset += self.fp.tell() - 80
self.tile = [ImageFile._Tile(decoder_name, (0, 0) + self.size, offset, args)]
def _get_size(
self, headers: dict[bytes, bytes], prefix: bytes
) -> tuple[int, int] | None:
naxis = int(headers[prefix + b"NAXIS"])
if naxis == 0:
return None
if naxis == 1:
return 1, int(headers[prefix + b"NAXIS1"])
else:
return int(headers[prefix + b"NAXIS1"]), int(headers[prefix + b"NAXIS2"])
def _parse_headers(
self, headers: dict[bytes, bytes]
) -> tuple[str, int, tuple[str | int, ...]]:
prefix = b""
decoder_name = "raw"
offset = 0
if (
headers.get(b"XTENSION") == b"'BINTABLE'"
and headers.get(b"ZIMAGE") == b"T"
and headers[b"ZCMPTYPE"] == b"'GZIP_1 '"
):
no_prefix_size = self._get_size(headers, prefix) or (0, 0)
number_of_bits = int(headers[b"BITPIX"])
offset = no_prefix_size[0] * no_prefix_size[1] * (number_of_bits // 8)
prefix = b"Z"
decoder_name = "fits_gzip"
size = self._get_size(headers, prefix)
if not size:
return "", 0, ()
self._size = size
number_of_bits = int(headers[prefix + b"BITPIX"])
if number_of_bits == 8:
self._mode = "L"
elif number_of_bits == 16:
self._mode = "I;16"
elif number_of_bits == 32:
self._mode = "I"
elif number_of_bits in (-32, -64):
self._mode = "F"
args: tuple[str | int, ...]
if decoder_name == "raw":
args = (self.mode, 0, -1)
else:
args = (number_of_bits,)
return decoder_name, offset, args
class FitsGzipDecoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
value = gzip.decompress(self.fd.read())
rows = []
offset = 0
number_of_bits = min(self.args[0] // 8, 4)
for y in range(self.state.ysize):
row = bytearray()
for x in range(self.state.xsize):
row += value[offset + (4 - number_of_bits) : offset + 4]
offset += 4
rows.append(row)
self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row]))
return -1, 0
# --------------------------------------------------------------------
# Registry
Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
Image.register_decoder("fits_gzip", FitsGzipDecoder)
Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
venv\Lib\site-packages\PIL\FliImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# FLI/FLC file handling.
#
# History:
# 95-09-01 fl Created
# 97-01-03 fl Fixed parser, setup decoder tile
# 98-07-15 fl Renamed offset attribute to avoid name clash
#
# Copyright (c) Secret Labs AB 1997-98.
# Copyright (c) Fredrik Lundh 1995-97.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
from . import Image, ImageFile, ImagePalette
from ._binary import i16le as i16
from ._binary import i32le as i32
from ._binary import o8
from ._util import DeferredError
#
# decoder
def _accept(prefix: bytes) -> bool:
return (
len(prefix) >= 6
and i16(prefix, 4) in [0xAF11, 0xAF12]
and i16(prefix, 14) in [0, 3] # flags
)
##
# Image plugin for the FLI/FLC animation format. Use the seek
# method to load individual frames.
class FliImageFile(ImageFile.ImageFile):
format = "FLI"
format_description = "Autodesk FLI/FLC Animation"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# HEAD
s = self.fp.read(128)
if not (_accept(s) and s[20:22] == b"\x00\x00"):
msg = "not an FLI/FLC file"
raise SyntaxError(msg)
# frames
self.n_frames = i16(s, 6)
self.is_animated = self.n_frames > 1
# image characteristics
self._mode = "P"
self._size = i16(s, 8), i16(s, 10)
# animation speed
duration = i32(s, 16)
magic = i16(s, 4)
if magic == 0xAF11:
duration = (duration * 1000) // 70
self.info["duration"] = duration
# look for palette
palette = [(a, a, a) for a in range(256)]
s = self.fp.read(16)
self.__offset = 128
if i16(s, 4) == 0xF100:
# prefix chunk; ignore it
self.__offset = self.__offset + i32(s)
self.fp.seek(self.__offset)
s = self.fp.read(16)
if i16(s, 4) == 0xF1FA:
# look for palette chunk
number_of_subchunks = i16(s, 6)
chunk_size: int | None = None
for _ in range(number_of_subchunks):
if chunk_size is not None:
self.fp.seek(chunk_size - 6, os.SEEK_CUR)
s = self.fp.read(6)
chunk_type = i16(s, 4)
if chunk_type in (4, 11):
self._palette(palette, 2 if chunk_type == 11 else 0)
break
chunk_size = i32(s)
if not chunk_size:
break
self.palette = ImagePalette.raw(
"RGB", b"".join(o8(r) + o8(g) + o8(b) for (r, g, b) in palette)
)
# set things up to decode first frame
self.__frame = -1
self._fp = self.fp
self.__rewind = self.fp.tell()
self.seek(0)
def _palette(self, palette: list[tuple[int, int, int]], shift: int) -> None:
# load palette
i = 0
for e in range(i16(self.fp.read(2))):
s = self.fp.read(2)
i = i + s[0]
n = s[1]
if n == 0:
n = 256
s = self.fp.read(n * 3)
for n in range(0, len(s), 3):
r = s[n] << shift
g = s[n + 1] << shift
b = s[n + 2] << shift
palette[i] = (r, g, b)
i += 1
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if frame < self.__frame:
self._seek(0)
for f in range(self.__frame + 1, frame + 1):
self._seek(f)
def _seek(self, frame: int) -> None:
if isinstance(self._fp, DeferredError):
raise self._fp.ex
if frame == 0:
self.__frame = -1
self._fp.seek(self.__rewind)
self.__offset = 128
else:
# ensure that the previous frame was loaded
self.load()
if frame != self.__frame + 1:
msg = f"cannot seek to frame {frame}"
raise ValueError(msg)
self.__frame = frame
# move to next frame
self.fp = self._fp
self.fp.seek(self.__offset)
s = self.fp.read(4)
if not s:
msg = "missing frame size"
raise EOFError(msg)
framesize = i32(s)
self.decodermaxblock = framesize
self.tile = [ImageFile._Tile("fli", (0, 0) + self.size, self.__offset)]
self.__offset += framesize
def tell(self) -> int:
return self.__frame
#
# registry
Image.register_open(FliImageFile.format, FliImageFile, _accept)
Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
venv\Lib\site-packages\PIL\FontFile.py
#
# The Python Imaging Library
# $Id$
#
# base class for raster font file parsers
#
# history:
# 1997-06-05 fl created
# 1997-08-19 fl restrict image width
#
# Copyright (c) 1997-1998 by Secret Labs AB
# Copyright (c) 1997-1998 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
from typing import BinaryIO
from . import Image, _binary
WIDTH = 800
def puti16(
fp: BinaryIO, values: tuple[int, int, int, int, int, int, int, int, int, int]
) -> None:
"""Write network order (big-endian) 16-bit sequence"""
for v in values:
if v < 0:
v += 65536
fp.write(_binary.o16be(v))
class FontFile:
"""Base class for raster font file handlers."""
bitmap: Image.Image | None = None
def __init__(self) -> None:
self.info: dict[bytes, bytes | int] = {}
self.glyph: list[
tuple[
tuple[int, int],
tuple[int, int, int, int],
tuple[int, int, int, int],
Image.Image,
]
| None
] = [None] * 256
def __getitem__(self, ix: int) -> (
tuple[
tuple[int, int],
tuple[int, int, int, int],
tuple[int, int, int, int],
Image.Image,
]
| None
):
return self.glyph[ix]
def compile(self) -> None:
"""Create metrics and bitmap"""
if self.bitmap:
return
# create bitmap large enough to hold all data
h = w = maxwidth = 0
lines = 1
for glyph in self.glyph:
if glyph:
d, dst, src, im = glyph
h = max(h, src[3] - src[1])
w = w + (src[2] - src[0])
if w > WIDTH:
lines += 1
w = src[2] - src[0]
maxwidth = max(maxwidth, w)
xsize = maxwidth
ysize = lines * h
if xsize == 0 and ysize == 0:
return
self.ysize = h
# paste glyphs into bitmap
self.bitmap = Image.new("1", (xsize, ysize))
self.metrics: list[
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]]
| None
] = [None] * 256
x = y = 0
for i in range(256):
glyph = self[i]
if glyph:
d, dst, src, im = glyph
xx = src[2] - src[0]
x0, y0 = x, y
x = x + xx
if x > WIDTH:
x, y = 0, y + h
x0, y0 = x, y
x = xx
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
self.bitmap.paste(im.crop(src), s)
self.metrics[i] = d, dst, s
def save(self, filename: str) -> None:
"""Save font"""
self.compile()
# font data
if not self.bitmap:
msg = "No bitmap created"
raise ValueError(msg)
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
# font metrics
with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
fp.write(b"PILfont\n")
fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
fp.write(b"DATA\n")
for id in range(256):
m = self.metrics[id]
if not m:
puti16(fp, (0,) * 10)
else:
puti16(fp, m[0] + m[1] + m[2])
venv\Lib\site-packages\PIL\FpxImagePlugin.py
#
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library.
# $Id$
#
# FlashPix support for PIL
#
# History:
# 97-01-25 fl Created (reads uncompressed RGB images only)
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import olefile
from . import Image, ImageFile
from ._binary import i32le as i32
# we map from colour field tuples to (mode, rawmode) descriptors
MODES = {
# opacity
(0x00007FFE,): ("A", "L"),
# monochrome
(0x00010000,): ("L", "L"),
(0x00018000, 0x00017FFE): ("RGBA", "LA"),
# photo YCC
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
(0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"),
# standard RGB (NIFRGB)
(0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
(0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"),
}
#
# --------------------------------------------------------------------
def _accept(prefix: bytes) -> bool:
return prefix.startswith(olefile.MAGIC)
##
# Image plugin for the FlashPix images.
class FpxImageFile(ImageFile.ImageFile):
format = "FPX"
format_description = "FlashPix"
def _open(self) -> None:
#
# read the OLE directory and see if this is a likely
# to be a FlashPix file
try:
self.ole = olefile.OleFileIO(self.fp)
except OSError as e:
msg = "not an FPX file; invalid OLE file"
raise SyntaxError(msg) from e
root = self.ole.root
if not root or root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
msg = "not an FPX file; bad root CLSID"
raise SyntaxError(msg)
self._open_index(1)
def _open_index(self, index: int = 1) -> None:
#
# get the Image Contents Property Set
prop = self.ole.getproperties(
[f"Data Object Store {index:06d}", "\005Image Contents"]
)
# size (highest resolution)
assert isinstance(prop[0x1000002], int)
assert isinstance(prop[0x1000003], int)
self._size = prop[0x1000002], prop[0x1000003]
size = max(self.size)
i = 1
while size > 64:
size = size // 2
i += 1
self.maxid = i - 1
# mode. instead of using a single field for this, flashpix
# requires you to specify the mode for each channel in each
# resolution subimage, and leaves it to the decoder to make
# sure that they all match. for now, we'll cheat and assume
# that this is always the case.
id = self.maxid << 16
s = prop[0x2000002 | id]
if not isinstance(s, bytes) or (bands := i32(s, 4)) > 4:
msg = "Invalid number of bands"
raise OSError(msg)
# note: for now, we ignore the "uncalibrated" flag
colors = tuple(i32(s, 8 + i * 4) & 0x7FFFFFFF for i in range(bands))
self._mode, self.rawmode = MODES[colors]
# load JPEG tables, if any
self.jpeg = {}
for i in range(256):
id = 0x3000001 | (i << 16)
if id in prop:
self.jpeg[i] = prop[id]
self._open_subimage(1, self.maxid)
def _open_subimage(self, index: int = 1, subimage: int = 0) -> None:
#
# setup tile descriptors for a given subimage
stream = [
f"Data Object Store {index:06d}",
f"Resolution {subimage:04d}",
"Subimage 0000 Header",
]
fp = self.ole.openstream(stream)
# skip prefix
fp.read(28)
# header stream
s = fp.read(36)
size = i32(s, 4), i32(s, 8)
# tilecount = i32(s, 12)
tilesize = i32(s, 16), i32(s, 20)
# channels = i32(s, 24)
offset = i32(s, 28)
length = i32(s, 32)
if size != self.size:
msg = "subimage mismatch"
raise OSError(msg)
# get tile descriptors
fp.seek(28 + offset)
s = fp.read(i32(s, 12) * length)
x = y = 0
xsize, ysize = size
xtile, ytile = tilesize
self.tile = []
for i in range(0, len(s), length):
x1 = min(xsize, x + xtile)
y1 = min(ysize, y + ytile)
compression = i32(s, i + 8)
if compression == 0:
self.tile.append(
ImageFile._Tile(
"raw",
(x, y, x1, y1),
i32(s, i) + 28,
self.rawmode,
)
)
elif compression == 1:
# FIXME: the fill decoder is not implemented
self.tile.append(
ImageFile._Tile(
"fill",
(x, y, x1, y1),
i32(s, i) + 28,
(self.rawmode, s[12:16]),
)
)
elif compression == 2:
internal_color_conversion = s[14]
jpeg_tables = s[15]
rawmode = self.rawmode
if internal_color_conversion:
# The image is stored as usual (usually YCbCr).
if rawmode == "RGBA":
# For "RGBA", data is stored as YCbCrA based on
# negative RGB. The following trick works around
# this problem :
jpegmode, rawmode = "YCbCrK", "CMYK"
else:
jpegmode = None # let the decoder decide
else:
# The image is stored as defined by rawmode
jpegmode = rawmode
self.tile.append(
ImageFile._Tile(
"jpeg",
(x, y, x1, y1),
i32(s, i) + 28,
(rawmode, jpegmode),
)
)
# FIXME: jpeg tables are tile dependent; the prefix
# data must be placed in the tile descriptor itself!
if jpeg_tables:
self.tile_prefix = self.jpeg[jpeg_tables]
else:
msg = "unknown/invalid compression"
raise OSError(msg)
x = x + xtile
if x >= xsize:
x, y = 0, y + ytile
if y >= ysize:
break # isn't really required
self.stream = stream
self._fp = self.fp
self.fp = None
def load(self) -> Image.core.PixelAccess | None:
if not self.fp:
self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
return ImageFile.ImageFile.load(self)
def close(self) -> None:
self.ole.close()
super().close()
def __exit__(self, *args: object) -> None:
self.ole.close()
super().__exit__()
#
# --------------------------------------------------------------------
Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
Image.register_extension(FpxImageFile.format, ".fpx")
venv\Lib\site-packages\PIL\FtexImagePlugin.py
"""
A Pillow loader for .ftc and .ftu files (FTEX)
Jerome Leclanche
The contents of this file are hereby released in the public domain (CC0)
Full text of the CC0 license:
https://creativecommons.org/publicdomain/zero/1.0/
Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
packed custom format called FTEX. This file format uses file extensions FTC
and FTU.
* FTC files are compressed textures (using standard texture compression).
* FTU files are not compressed.
Texture File Format
The FTC and FTU texture files both use the same format. This
has the following structure:
{header}
{format_directory}
{data}
Where:
{header} = {
u32:magic,
u32:version,
u32:width,
u32:height,
u32:mipmap_count,
u32:format_count
}
* The "magic" number is "FTEX".
* "width" and "height" are the dimensions of the texture.
* "mipmap_count" is the number of mipmaps in the texture.
* "format_count" is the number of texture formats (different versions of the
same texture) in this file.
{format_directory} = format_count * { u32:format, u32:where }
The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB
uncompressed textures.
The texture data for a format starts at the position "where" in the file.
Each set of texture data in the file has the following structure:
{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
* "mipmap_size" is the number of bytes in that mip level. For compressed
textures this is the size of the texture data compressed with DXT1. For 24 bit
uncompressed textures, this is 3 * width * height. Following this are the image
bytes for that mipmap level.
Note: All data is stored in little-Endian (Intel) byte order.
"""
from __future__ import annotations
import struct
from enum import IntEnum
from io import BytesIO
from . import Image, ImageFile
MAGIC = b"FTEX"
class Format(IntEnum):
DXT1 = 0
UNCOMPRESSED = 1
class FtexImageFile(ImageFile.ImageFile):
format = "FTEX"
format_description = "Texture File Format (IW2:EOC)"
def _open(self) -> None:
if not _accept(self.fp.read(4)):
msg = "not an FTEX file"
raise SyntaxError(msg)
struct.unpack(" None:
pass
def _accept(prefix: bytes) -> bool:
return prefix.startswith(MAGIC)
Image.register_open(FtexImageFile.format, FtexImageFile, _accept)
Image.register_extensions(FtexImageFile.format, [".ftc", ".ftu"])
venv\Lib\site-packages\PIL\GbrImagePlugin.py
#
# The Python Imaging Library
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
# 16-01-08 es Version 2
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
# Copyright (c) Eric Soroos 2016.
#
# See the README file for information on usage and redistribution.
#
#
# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for
# format documentation.
#
# This code Interprets version 1 and 2 .gbr files.
# Version 1 files are obsolete, and should not be used for new
# brushes.
# Version 2 files are saved by GIMP v2.8 (at least)
# Version 3 files have a format specifier of 18 for 16bit floats in
# the color depth field. This is currently unsupported by Pillow.
from __future__ import annotations
from . import Image, ImageFile
from ._binary import i32be as i32
def _accept(prefix: bytes) -> bool:
return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2)
##
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self) -> None:
header_size = i32(self.fp.read(4))
if header_size < 20:
msg = "not a GIMP brush"
raise SyntaxError(msg)
version = i32(self.fp.read(4))
if version not in (1, 2):
msg = f"Unsupported GIMP brush version: {version}"
raise SyntaxError(msg)
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
color_depth = i32(self.fp.read(4))
if width <= 0 or height <= 0:
msg = "not a GIMP brush"
raise SyntaxError(msg)
if color_depth not in (1, 4):
msg = f"Unsupported GIMP brush color depth: {color_depth}"
raise SyntaxError(msg)
if version == 1:
comment_length = header_size - 20
else:
comment_length = header_size - 28
magic_number = self.fp.read(4)
if magic_number != b"GIMP":
msg = "not a GIMP brush, bad magic number"
raise SyntaxError(msg)
self.info["spacing"] = i32(self.fp.read(4))
comment = self.fp.read(comment_length)[:-1]
if color_depth == 1:
self._mode = "L"
else:
self._mode = "RGBA"
self._size = width, height
self.info["comment"] = comment
# Image might not be small
Image._decompression_bomb_check(self.size)
# Data is an uncompressed block of w * h * bytes/pixel
self._data_size = width * height * color_depth
def load(self) -> Image.core.PixelAccess | None:
if self._im is None:
self.im = Image.core.new(self.mode, self.size)
self.frombytes(self.fp.read(self._data_size))
return Image.Image.load(self)
#
# registry
Image.register_open(GbrImageFile.format, GbrImageFile, _accept)
Image.register_extension(GbrImageFile.format, ".gbr")
venv\Lib\site-packages\PIL\GdImageFile.py
#
# The Python Imaging Library.
# $Id$
#
# GD file handling
#
# History:
# 1996-04-12 fl Created
#
# Copyright (c) 1997 by Secret Labs AB.
# Copyright (c) 1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
"""
.. note::
This format cannot be automatically recognized, so the
class is not registered for use with :py:func:`PIL.Image.open()`. To open a
gd file, use the :py:func:`PIL.GdImageFile.open()` function instead.
.. warning::
THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
implementation is provided for convenience and demonstrational
purposes only.
"""
from __future__ import annotations
from typing import IO
from . import ImageFile, ImagePalette, UnidentifiedImageError
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._typing import StrOrBytesPath
class GdImageFile(ImageFile.ImageFile):
"""
Image plugin for the GD uncompressed format. Note that this format
is not supported by the standard :py:func:`PIL.Image.open()` function. To use
this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and
use the :py:func:`PIL.GdImageFile.open()` function.
"""
format = "GD"
format_description = "GD uncompressed images"
def _open(self) -> None:
# Header
assert self.fp is not None
s = self.fp.read(1037)
if i16(s) not in [65534, 65535]:
msg = "Not a valid GD 2.x .gd file"
raise SyntaxError(msg)
self._mode = "P"
self._size = i16(s, 2), i16(s, 4)
true_color = s[6]
true_color_offset = 2 if true_color else 0
# transparency index
tindex = i32(s, 7 + true_color_offset)
if tindex < 256:
self.info["transparency"] = tindex
self.palette = ImagePalette.raw(
"RGBX", s[7 + true_color_offset + 6 : 7 + true_color_offset + 6 + 256 * 4]
)
self.tile = [
ImageFile._Tile(
"raw",
(0, 0) + self.size,
7 + true_color_offset + 6 + 256 * 4,
"L",
)
]
def open(fp: StrOrBytesPath | IO[bytes], mode: str = "r") -> GdImageFile:
"""
Load texture from a GD image file.
:param fp: GD file name, or an opened file handle.
:param mode: Optional mode. In this version, if the mode argument
is given, it must be "r".
:returns: An image instance.
:raises OSError: If the image could not be read.
"""
if mode != "r":
msg = "bad mode"
raise ValueError(msg)
try:
return GdImageFile(fp)
except SyntaxError as e:
msg = "cannot identify this image file"
raise UnidentifiedImageError(msg) from e
venv\Lib\site-packages\PIL\GifImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# GIF file handling
#
# History:
# 1995-09-01 fl Created
# 1996-12-14 fl Added interlace support
# 1996-12-30 fl Added animation support
# 1997-01-05 fl Added write support, fixed local colour map bug
# 1997-02-23 fl Make sure to load raster data in getdata()
# 1997-07-05 fl Support external decoder (0.4)
# 1998-07-09 fl Handle all modes when saving (0.5)
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6)
# 2001-04-17 fl Added palette optimization (0.7)
# 2002-06-06 fl Added transparency support for save (0.8)
# 2004-02-24 fl Disable interlacing for small images
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1995-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import itertools
import math
import os
import subprocess
from enum import IntEnum
from functools import cached_property
from typing import IO, Any, Literal, NamedTuple, Union, cast
from . import (
Image,
ImageChops,
ImageFile,
ImageMath,
ImageOps,
ImagePalette,
ImageSequence,
)
from ._binary import i16le as i16
from ._binary import o8
from ._binary import o16le as o16
from ._util import DeferredError
TYPE_CHECKING = False
if TYPE_CHECKING:
from . import _imaging
from ._typing import Buffer
class LoadingStrategy(IntEnum):
""".. versionadded:: 9.1.0"""
RGB_AFTER_FIRST = 0
RGB_AFTER_DIFFERENT_PALETTE_ONLY = 1
RGB_ALWAYS = 2
#: .. versionadded:: 9.1.0
LOADING_STRATEGY = LoadingStrategy.RGB_AFTER_FIRST
# --------------------------------------------------------------------
# Identify/read GIF files
def _accept(prefix: bytes) -> bool:
return prefix.startswith((b"GIF87a", b"GIF89a"))
##
# Image plugin for GIF images. This plugin supports both GIF87 and
# GIF89 images.
class GifImageFile(ImageFile.ImageFile):
format = "GIF"
format_description = "Compuserve GIF"
_close_exclusive_fp_after_loading = False
global_palette = None
def data(self) -> bytes | None:
s = self.fp.read(1)
if s and s[0]:
return self.fp.read(s[0])
return None
def _is_palette_needed(self, p: bytes) -> bool:
for i in range(0, len(p), 3):
if not (i // 3 == p[i] == p[i + 1] == p[i + 2]):
return True
return False
def _open(self) -> None:
# Screen
s = self.fp.read(13)
if not _accept(s):
msg = "not a GIF file"
raise SyntaxError(msg)
self.info["version"] = s[:6]
self._size = i16(s, 6), i16(s, 8)
flags = s[10]
bits = (flags & 7) + 1
if flags & 128:
# get global palette
self.info["background"] = s[11]
# check if palette contains colour indices
p = self.fp.read(3 << bits)
if self._is_palette_needed(p):
p = ImagePalette.raw("RGB", p)
self.global_palette = self.palette = p
self._fp = self.fp # FIXME: hack
self.__rewind = self.fp.tell()
self._n_frames: int | None = None
self._seek(0) # get ready to read first frame
@property
def n_frames(self) -> int:
if self._n_frames is None:
current = self.tell()
try:
while True:
self._seek(self.tell() + 1, False)
except EOFError:
self._n_frames = self.tell() + 1
self.seek(current)
return self._n_frames
@cached_property
def is_animated(self) -> bool:
if self._n_frames is not None:
return self._n_frames != 1
current = self.tell()
if current:
return True
try:
self._seek(1, False)
is_animated = True
except EOFError:
is_animated = False
self.seek(current)
return is_animated
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if frame < self.__frame:
self._im = None
self._seek(0)
last_frame = self.__frame
for f in range(self.__frame + 1, frame + 1):
try:
self._seek(f)
except EOFError as e:
self.seek(last_frame)
msg = "no more images in GIF file"
raise EOFError(msg) from e
def _seek(self, frame: int, update_image: bool = True) -> None:
if isinstance(self._fp, DeferredError):
raise self._fp.ex
if frame == 0:
# rewind
self.__offset = 0
self.dispose: _imaging.ImagingCore | None = None
self.__frame = -1
self._fp.seek(self.__rewind)
self.disposal_method = 0
if "comment" in self.info:
del self.info["comment"]
else:
# ensure that the previous frame was loaded
if self.tile and update_image:
self.load()
if frame != self.__frame + 1:
msg = f"cannot seek to frame {frame}"
raise ValueError(msg)
self.fp = self._fp
if self.__offset:
# backup to last frame
self.fp.seek(self.__offset)
while self.data():
pass
self.__offset = 0
s = self.fp.read(1)
if not s or s == b";":
msg = "no more images in GIF file"
raise EOFError(msg)
palette: ImagePalette.ImagePalette | Literal[False] | None = None
info: dict[str, Any] = {}
frame_transparency = None
interlace = None
frame_dispose_extent = None
while True:
if not s:
s = self.fp.read(1)
if not s or s == b";":
break
elif s == b"!":
#
# extensions
#
s = self.fp.read(1)
block = self.data()
if s[0] == 249 and block is not None:
#
# graphic control extension
#
flags = block[0]
if flags & 1:
frame_transparency = block[3]
info["duration"] = i16(block, 1) * 10
# disposal method - find the value of bits 4 - 6
dispose_bits = 0b00011100 & flags
dispose_bits = dispose_bits >> 2
if dispose_bits:
# only set the dispose if it is not
# unspecified. I'm not sure if this is
# correct, but it seems to prevent the last
# frame from looking odd for some animations
self.disposal_method = dispose_bits
elif s[0] == 254:
#
# comment extension
#
comment = b""
# Read this comment block
while block:
comment += block
block = self.data()
if "comment" in info:
# If multiple comment blocks in frame, separate with \n
info["comment"] += b"\n" + comment
else:
info["comment"] = comment
s = None
continue
elif s[0] == 255 and frame == 0 and block is not None:
#
# application extension
#
info["extension"] = block, self.fp.tell()
if block.startswith(b"NETSCAPE2.0"):
block = self.data()
if block and len(block) >= 3 and block[0] == 1:
self.info["loop"] = i16(block, 1)
while self.data():
pass
elif s == b",":
#
# local image
#
s = self.fp.read(9)
# extent
x0, y0 = i16(s, 0), i16(s, 2)
x1, y1 = x0 + i16(s, 4), y0 + i16(s, 6)
if (x1 > self.size[0] or y1 > self.size[1]) and update_image:
self._size = max(x1, self.size[0]), max(y1, self.size[1])
Image._decompression_bomb_check(self._size)
frame_dispose_extent = x0, y0, x1, y1
flags = s[8]
interlace = (flags & 64) != 0
if flags & 128:
bits = (flags & 7) + 1
p = self.fp.read(3 << bits)
if self._is_palette_needed(p):
palette = ImagePalette.raw("RGB", p)
else:
palette = False
# image data
bits = self.fp.read(1)[0]
self.__offset = self.fp.tell()
break
s = None
if interlace is None:
msg = "image not found in GIF frame"
raise EOFError(msg)
self.__frame = frame
if not update_image:
return
self.tile = []
if self.dispose:
self.im.paste(self.dispose, self.dispose_extent)
self._frame_palette = palette if palette is not None else self.global_palette
self._frame_transparency = frame_transparency
if frame == 0:
if self._frame_palette:
if LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS:
self._mode = "RGBA" if frame_transparency is not None else "RGB"
else:
self._mode = "P"
else:
self._mode = "L"
if palette:
self.palette = palette
elif self.global_palette:
from copy import copy
self.palette = copy(self.global_palette)
else:
self.palette = None
else:
if self.mode == "P":
if (
LOADING_STRATEGY != LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
or palette
):
if "transparency" in self.info:
self.im.putpalettealpha(self.info["transparency"], 0)
self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG)
self._mode = "RGBA"
del self.info["transparency"]
else:
self._mode = "RGB"
self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG)
def _rgb(color: int) -> tuple[int, int, int]:
if self._frame_palette:
if color * 3 + 3 > len(self._frame_palette.palette):
color = 0
return cast(
tuple[int, int, int],
tuple(self._frame_palette.palette[color * 3 : color * 3 + 3]),
)
else:
return (color, color, color)
self.dispose = None
self.dispose_extent: tuple[int, int, int, int] | None = frame_dispose_extent
if self.dispose_extent and self.disposal_method >= 2:
try:
if self.disposal_method == 2:
# replace with background colour
# only dispose the extent in this frame
x0, y0, x1, y1 = self.dispose_extent
dispose_size = (x1 - x0, y1 - y0)
Image._decompression_bomb_check(dispose_size)
# by convention, attempt to use transparency first
dispose_mode = "P"
color = self.info.get("transparency", frame_transparency)
if color is not None:
if self.mode in ("RGB", "RGBA"):
dispose_mode = "RGBA"
color = _rgb(color) + (0,)
else:
color = self.info.get("background", 0)
if self.mode in ("RGB", "RGBA"):
dispose_mode = "RGB"
color = _rgb(color)
self.dispose = Image.core.fill(dispose_mode, dispose_size, color)
else:
# replace with previous contents
if self._im is not None:
# only dispose the extent in this frame
self.dispose = self._crop(self.im, self.dispose_extent)
elif frame_transparency is not None:
x0, y0, x1, y1 = self.dispose_extent
dispose_size = (x1 - x0, y1 - y0)
Image._decompression_bomb_check(dispose_size)
dispose_mode = "P"
color = frame_transparency
if self.mode in ("RGB", "RGBA"):
dispose_mode = "RGBA"
color = _rgb(frame_transparency) + (0,)
self.dispose = Image.core.fill(
dispose_mode, dispose_size, color
)
except AttributeError:
pass
if interlace is not None:
transparency = -1
if frame_transparency is not None:
if frame == 0:
if LOADING_STRATEGY != LoadingStrategy.RGB_ALWAYS:
self.info["transparency"] = frame_transparency
elif self.mode not in ("RGB", "RGBA"):
transparency = frame_transparency
self.tile = [
ImageFile._Tile(
"gif",
(x0, y0, x1, y1),
self.__offset,
(bits, interlace, transparency),
)
]
if info.get("comment"):
self.info["comment"] = info["comment"]
for k in ["duration", "extension"]:
if k in info:
self.info[k] = info[k]
elif k in self.info:
del self.info[k]
def load_prepare(self) -> None:
temp_mode = "P" if self._frame_palette else "L"
self._prev_im = None
if self.__frame == 0:
if self._frame_transparency is not None:
self.im = Image.core.fill(
temp_mode, self.size, self._frame_transparency
)
elif self.mode in ("RGB", "RGBA"):
self._prev_im = self.im
if self._frame_palette:
self.im = Image.core.fill("P", self.size, self._frame_transparency or 0)
self.im.putpalette("RGB", *self._frame_palette.getdata())
else:
self._im = None
if not self._prev_im and self._im is not None and self.size != self.im.size:
expanded_im = Image.core.fill(self.im.mode, self.size)
if self._frame_palette:
expanded_im.putpalette("RGB", *self._frame_palette.getdata())
expanded_im.paste(self.im, (0, 0) + self.im.size)
self.im = expanded_im
self._mode = temp_mode
self._frame_palette = None
super().load_prepare()
def load_end(self) -> None:
if self.__frame == 0:
if self.mode == "P" and LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS:
if self._frame_transparency is not None:
self.im.putpalettealpha(self._frame_transparency, 0)
self._mode = "RGBA"
else:
self._mode = "RGB"
self.im = self.im.convert(self.mode, Image.Dither.FLOYDSTEINBERG)
return
if not self._prev_im:
return
if self.size != self._prev_im.size:
if self._frame_transparency is not None:
expanded_im = Image.core.fill("RGBA", self.size)
else:
expanded_im = Image.core.fill("P", self.size)
expanded_im.putpalette("RGB", "RGB", self.im.getpalette())
expanded_im = expanded_im.convert("RGB")
expanded_im.paste(self._prev_im, (0, 0) + self._prev_im.size)
self._prev_im = expanded_im
assert self._prev_im is not None
if self._frame_transparency is not None:
if self.mode == "L":
frame_im = self.im.convert_transparent("LA", self._frame_transparency)
else:
self.im.putpalettealpha(self._frame_transparency, 0)
frame_im = self.im.convert("RGBA")
else:
frame_im = self.im.convert("RGB")
assert self.dispose_extent is not None
frame_im = self._crop(frame_im, self.dispose_extent)
self.im = self._prev_im
self._mode = self.im.mode
if frame_im.mode in ("LA", "RGBA"):
self.im.paste(frame_im, self.dispose_extent, frame_im)
else:
self.im.paste(frame_im, self.dispose_extent)
def tell(self) -> int:
return self.__frame
# --------------------------------------------------------------------
# Write GIF files
RAWMODE = {"1": "L", "L": "L", "P": "P"}
def _normalize_mode(im: Image.Image) -> Image.Image:
"""
Takes an image (or frame), returns an image in a mode that is appropriate
for saving in a Gif.
It may return the original image, or it may return an image converted to
palette or 'L' mode.
:param im: Image object
:returns: Image object
"""
if im.mode in RAWMODE:
im.load()
return im
if Image.getmodebase(im.mode) == "RGB":
im = im.convert("P", palette=Image.Palette.ADAPTIVE)
assert im.palette is not None
if im.palette.mode == "RGBA":
for rgba in im.palette.colors:
if rgba[3] == 0:
im.info["transparency"] = im.palette.colors[rgba]
break
return im
return im.convert("L")
_Palette = Union[bytes, bytearray, list[int], ImagePalette.ImagePalette]
def _normalize_palette(
im: Image.Image, palette: _Palette | None, info: dict[str, Any]
) -> Image.Image:
"""
Normalizes the palette for image.
- Sets the palette to the incoming palette, if provided.
- Ensures that there's a palette for L mode images
- Optimizes the palette if necessary/desired.
:param im: Image object
:param palette: bytes object containing the source palette, or ....
:param info: encoderinfo
:returns: Image object
"""
source_palette = None
if palette:
# a bytes palette
if isinstance(palette, (bytes, bytearray, list)):
source_palette = bytearray(palette[:768])
if isinstance(palette, ImagePalette.ImagePalette):
source_palette = bytearray(palette.palette)
if im.mode == "P":
if not source_palette:
im_palette = im.getpalette(None)
assert im_palette is not None
source_palette = bytearray(im_palette)
else: # L-mode
if not source_palette:
source_palette = bytearray(i // 3 for i in range(768))
im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette)
assert source_palette is not None
if palette:
used_palette_colors: list[int | None] = []
assert im.palette is not None
for i in range(0, len(source_palette), 3):
source_color = tuple(source_palette[i : i + 3])
index = im.palette.colors.get(source_color)
if index in used_palette_colors:
index = None
used_palette_colors.append(index)
for i, index in enumerate(used_palette_colors):
if index is None:
for j in range(len(used_palette_colors)):
if j not in used_palette_colors:
used_palette_colors[i] = j
break
dest_map: list[int] = []
for index in used_palette_colors:
assert index is not None
dest_map.append(index)
im = im.remap_palette(dest_map)
else:
optimized_palette_colors = _get_optimize(im, info)
if optimized_palette_colors is not None:
im = im.remap_palette(optimized_palette_colors, source_palette)
if "transparency" in info:
try:
info["transparency"] = optimized_palette_colors.index(
info["transparency"]
)
except ValueError:
del info["transparency"]
return im
assert im.palette is not None
im.palette.palette = source_palette
return im
def _write_single_frame(
im: Image.Image,
fp: IO[bytes],
palette: _Palette | None,
) -> None:
im_out = _normalize_mode(im)
for k, v in im_out.info.items():
if isinstance(k, str):
im.encoderinfo.setdefault(k, v)
im_out = _normalize_palette(im_out, palette, im.encoderinfo)
for s in _get_global_header(im_out, im.encoderinfo):
fp.write(s)
# local image header
flags = 0
if get_interlace(im):
flags = flags | 64
_write_local_header(fp, im, (0, 0), flags)
im_out.encoderconfig = (8, get_interlace(im))
ImageFile._save(
im_out, fp, [ImageFile._Tile("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])]
)
fp.write(b"\0") # end of image data
def _getbbox(
base_im: Image.Image, im_frame: Image.Image
) -> tuple[Image.Image, tuple[int, int, int, int] | None]:
palette_bytes = [
bytes(im.palette.palette) if im.palette else b"" for im in (base_im, im_frame)
]
if palette_bytes[0] != palette_bytes[1]:
im_frame = im_frame.convert("RGBA")
base_im = base_im.convert("RGBA")
delta = ImageChops.subtract_modulo(im_frame, base_im)
return delta, delta.getbbox(alpha_only=False)
class _Frame(NamedTuple):
im: Image.Image
bbox: tuple[int, int, int, int] | None
encoderinfo: dict[str, Any]
def _write_multiple_frames(
im: Image.Image, fp: IO[bytes], palette: _Palette | None
) -> bool:
duration = im.encoderinfo.get("duration")
disposal = im.encoderinfo.get("disposal", im.info.get("disposal"))
im_frames: list[_Frame] = []
previous_im: Image.Image | None = None
frame_count = 0
background_im = None
for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])):
for im_frame in ImageSequence.Iterator(imSequence):
# a copy is required here since seek can still mutate the image
im_frame = _normalize_mode(im_frame.copy())
if frame_count == 0:
for k, v in im_frame.info.items():
if k == "transparency":
continue
if isinstance(k, str):
im.encoderinfo.setdefault(k, v)
encoderinfo = im.encoderinfo.copy()
if "transparency" in im_frame.info:
encoderinfo.setdefault("transparency", im_frame.info["transparency"])
im_frame = _normalize_palette(im_frame, palette, encoderinfo)
if isinstance(duration, (list, tuple)):
encoderinfo["duration"] = duration[frame_count]
elif duration is None and "duration" in im_frame.info:
encoderinfo["duration"] = im_frame.info["duration"]
if isinstance(disposal, (list, tuple)):
encoderinfo["disposal"] = disposal[frame_count]
frame_count += 1
diff_frame = None
if im_frames and previous_im:
# delta frame
delta, bbox = _getbbox(previous_im, im_frame)
if not bbox:
# This frame is identical to the previous frame
if encoderinfo.get("duration"):
im_frames[-1].encoderinfo["duration"] += encoderinfo["duration"]
continue
if im_frames[-1].encoderinfo.get("disposal") == 2:
# To appear correctly in viewers using a convention,
# only consider transparency, and not background color
color = im.encoderinfo.get(
"transparency", im.info.get("transparency")
)
if color is not None:
if background_im is None:
background = _get_background(im_frame, color)
background_im = Image.new("P", im_frame.size, background)
first_palette = im_frames[0].im.palette
assert first_palette is not None
background_im.putpalette(first_palette, first_palette.mode)
bbox = _getbbox(background_im, im_frame)[1]
else:
bbox = (0, 0) + im_frame.size
elif encoderinfo.get("optimize") and im_frame.mode != "1":
if "transparency" not in encoderinfo:
assert im_frame.palette is not None
try:
encoderinfo["transparency"] = (
im_frame.palette._new_color_index(im_frame)
)
except ValueError:
pass
if "transparency" in encoderinfo:
# When the delta is zero, fill the image with transparency
diff_frame = im_frame.copy()
fill = Image.new("P", delta.size, encoderinfo["transparency"])
if delta.mode == "RGBA":
r, g, b, a = delta.split()
mask = ImageMath.lambda_eval(
lambda args: args["convert"](
args["max"](
args["max"](
args["max"](args["r"], args["g"]), args["b"]
),
args["a"],
)
* 255,
"1",
),
r=r,
g=g,
b=b,
a=a,
)
else:
if delta.mode == "P":
# Convert to L without considering palette
delta_l = Image.new("L", delta.size)
delta_l.putdata(delta.getdata())
delta = delta_l
mask = ImageMath.lambda_eval(
lambda args: args["convert"](args["im"] * 255, "1"),
im=delta,
)
diff_frame.paste(fill, mask=ImageOps.invert(mask))
else:
bbox = None
previous_im = im_frame
im_frames.append(_Frame(diff_frame or im_frame, bbox, encoderinfo))
if len(im_frames) == 1:
if "duration" in im.encoderinfo:
# Since multiple frames will not be written, use the combined duration
im.encoderinfo["duration"] = im_frames[0].encoderinfo["duration"]
return False
for frame_data in im_frames:
im_frame = frame_data.im
if not frame_data.bbox:
# global header
for s in _get_global_header(im_frame, frame_data.encoderinfo):
fp.write(s)
offset = (0, 0)
else:
# compress difference
if not palette:
frame_data.encoderinfo["include_color_table"] = True
if frame_data.bbox != (0, 0) + im_frame.size:
im_frame = im_frame.crop(frame_data.bbox)
offset = frame_data.bbox[:2]
_write_frame_data(fp, im_frame, offset, frame_data.encoderinfo)
return True
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
_save(im, fp, filename, save_all=True)
def _save(
im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False
) -> None:
# header
if "palette" in im.encoderinfo or "palette" in im.info:
palette = im.encoderinfo.get("palette", im.info.get("palette"))
else:
palette = None
im.encoderinfo.setdefault("optimize", True)
if not save_all or not _write_multiple_frames(im, fp, palette):
_write_single_frame(im, fp, palette)
fp.write(b";") # end of file
if hasattr(fp, "flush"):
fp.flush()
def get_interlace(im: Image.Image) -> int:
interlace = im.encoderinfo.get("interlace", 1)
# workaround for @PIL153
if min(im.size) < 16:
interlace = 0
return interlace
def _write_local_header(
fp: IO[bytes], im: Image.Image, offset: tuple[int, int], flags: int
) -> None:
try:
transparency = im.encoderinfo["transparency"]
except KeyError:
transparency = None
if "duration" in im.encoderinfo:
duration = int(im.encoderinfo["duration"] / 10)
else:
duration = 0
disposal = int(im.encoderinfo.get("disposal", 0))
if transparency is not None or duration != 0 or disposal:
packed_flag = 1 if transparency is not None else 0
packed_flag |= disposal << 2
fp.write(
b"!"
+ o8(249) # extension intro
+ o8(4) # length
+ o8(packed_flag) # packed fields
+ o16(duration) # duration
+ o8(transparency or 0) # transparency index
+ o8(0)
)
include_color_table = im.encoderinfo.get("include_color_table")
if include_color_table:
palette_bytes = _get_palette_bytes(im)
color_table_size = _get_color_table_size(palette_bytes)
if color_table_size:
flags = flags | 128 # local color table flag
flags = flags | color_table_size
fp.write(
b","
+ o16(offset[0]) # offset
+ o16(offset[1])
+ o16(im.size[0]) # size
+ o16(im.size[1])
+ o8(flags) # flags
)
if include_color_table and color_table_size:
fp.write(_get_header_palette(palette_bytes))
fp.write(o8(8)) # bits
def _save_netpbm(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
# Unused by default.
# To use, uncomment the register_save call at the end of the file.
#
# If you need real GIF compression and/or RGB quantization, you
# can use the external NETPBM/PBMPLUS utilities. See comments
# below for information on how to enable this.
tempfile = im._dump()
try:
with open(filename, "wb") as f:
if im.mode != "RGB":
subprocess.check_call(
["ppmtogif", tempfile], stdout=f, stderr=subprocess.DEVNULL
)
else:
# Pipe ppmquant output into ppmtogif
# "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename)
quant_cmd = ["ppmquant", "256", tempfile]
togif_cmd = ["ppmtogif"]
quant_proc = subprocess.Popen(
quant_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
)
togif_proc = subprocess.Popen(
togif_cmd,
stdin=quant_proc.stdout,
stdout=f,
stderr=subprocess.DEVNULL,
)
# Allow ppmquant to receive SIGPIPE if ppmtogif exits
assert quant_proc.stdout is not None
quant_proc.stdout.close()
retcode = quant_proc.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, quant_cmd)
retcode = togif_proc.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, togif_cmd)
finally:
try:
os.unlink(tempfile)
except OSError:
pass
# Force optimization so that we can test performance against
# cases where it took lots of memory and time previously.
_FORCE_OPTIMIZE = False
def _get_optimize(im: Image.Image, info: dict[str, Any]) -> list[int] | None:
"""
Palette optimization is a potentially expensive operation.
This function determines if the palette should be optimized using
some heuristics, then returns the list of palette entries in use.
:param im: Image object
:param info: encoderinfo
:returns: list of indexes of palette entries in use, or None
"""
if im.mode in ("P", "L") and info and info.get("optimize"):
# Potentially expensive operation.
# The palette saves 3 bytes per color not used, but palette
# lengths are restricted to 3*(2**N) bytes. Max saving would
# be 768 -> 6 bytes if we went all the way down to 2 colors.
# * If we're over 128 colors, we can't save any space.
# * If there aren't any holes, it's not worth collapsing.
# * If we have a 'large' image, the palette is in the noise.
# create the new palette if not every color is used
optimise = _FORCE_OPTIMIZE or im.mode == "L"
if optimise or im.width * im.height < 512 * 512:
# check which colors are used
used_palette_colors = []
for i, count in enumerate(im.histogram()):
if count:
used_palette_colors.append(i)
if optimise or max(used_palette_colors) >= len(used_palette_colors):
return used_palette_colors
assert im.palette is not None
num_palette_colors = len(im.palette.palette) // Image.getmodebands(
im.palette.mode
)
current_palette_size = 1 << (num_palette_colors - 1).bit_length()
if (
# check that the palette would become smaller when saved
len(used_palette_colors) <= current_palette_size // 2
# check that the palette is not already the smallest possible size
and current_palette_size > 2
):
return used_palette_colors
return None
def _get_color_table_size(palette_bytes: bytes) -> int:
# calculate the palette size for the header
if not palette_bytes:
return 0
elif len(palette_bytes) < 9:
return 1
else:
return math.ceil(math.log(len(palette_bytes) // 3, 2)) - 1
def _get_header_palette(palette_bytes: bytes) -> bytes:
"""
Returns the palette, null padded to the next power of 2 (*3) bytes
suitable for direct inclusion in the GIF header
:param palette_bytes: Unpadded palette bytes, in RGBRGB form
:returns: Null padded palette
"""
color_table_size = _get_color_table_size(palette_bytes)
# add the missing amount of bytes
# the palette has to be 2< 0:
palette_bytes += o8(0) * 3 * actual_target_size_diff
return palette_bytes
def _get_palette_bytes(im: Image.Image) -> bytes:
"""
Gets the palette for inclusion in the gif header
:param im: Image object
:returns: Bytes, len<=768 suitable for inclusion in gif header
"""
if not im.palette:
return b""
palette = bytes(im.palette.palette)
if im.palette.mode == "RGBA":
palette = b"".join(palette[i * 4 : i * 4 + 3] for i in range(len(palette) // 3))
return palette
def _get_background(
im: Image.Image,
info_background: int | tuple[int, int, int] | tuple[int, int, int, int] | None,
) -> int:
background = 0
if info_background:
if isinstance(info_background, tuple):
# WebPImagePlugin stores an RGBA value in info["background"]
# So it must be converted to the same format as GifImagePlugin's
# info["background"] - a global color table index
assert im.palette is not None
try:
background = im.palette.getcolor(info_background, im)
except ValueError as e:
if str(e) not in (
# If all 256 colors are in use,
# then there is no need for the background color
"cannot allocate more than 256 colors",
# Ignore non-opaque WebP background
"cannot add non-opaque RGBA color to RGB palette",
):
raise
else:
background = info_background
return background
def _get_global_header(im: Image.Image, info: dict[str, Any]) -> list[bytes]:
"""Return a list of strings representing a GIF header"""
# Header Block
# https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp
version = b"87a"
if im.info.get("version") == b"89a" or (
info
and (
"transparency" in info
or info.get("loop") is not None
or info.get("duration")
or info.get("comment")
)
):
version = b"89a"
background = _get_background(im, info.get("background"))
palette_bytes = _get_palette_bytes(im)
color_table_size = _get_color_table_size(palette_bytes)
header = [
b"GIF" # signature
+ version # version
+ o16(im.size[0]) # canvas width
+ o16(im.size[1]), # canvas height
# Logical Screen Descriptor
# size of global color table + global color table flag
o8(color_table_size + 128), # packed fields
# background + reserved/aspect
o8(background) + o8(0),
# Global Color Table
_get_header_palette(palette_bytes),
]
if info.get("loop") is not None:
header.append(
b"!"
+ o8(255) # extension intro
+ o8(11)
+ b"NETSCAPE2.0"
+ o8(3)
+ o8(1)
+ o16(info["loop"]) # number of loops
+ o8(0)
)
if info.get("comment"):
comment_block = b"!" + o8(254) # extension intro
comment = info["comment"]
if isinstance(comment, str):
comment = comment.encode()
for i in range(0, len(comment), 255):
subblock = comment[i : i + 255]
comment_block += o8(len(subblock)) + subblock
comment_block += o8(0)
header.append(comment_block)
return header
def _write_frame_data(
fp: IO[bytes],
im_frame: Image.Image,
offset: tuple[int, int],
params: dict[str, Any],
) -> None:
try:
im_frame.encoderinfo = params
# local image header
_write_local_header(fp, im_frame, offset, 0)
ImageFile._save(
im_frame,
fp,
[ImageFile._Tile("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])],
)
fp.write(b"\0") # end of image data
finally:
del im_frame.encoderinfo
# --------------------------------------------------------------------
# Legacy GIF utilities
def getheader(
im: Image.Image, palette: _Palette | None = None, info: dict[str, Any] | None = None
) -> tuple[list[bytes], list[int] | None]:
"""
Legacy Method to get Gif data from image.
Warning:: May modify image data.
:param im: Image object
:param palette: bytes object containing the source palette, or ....
:param info: encoderinfo
:returns: tuple of(list of header items, optimized palette)
"""
if info is None:
info = {}
used_palette_colors = _get_optimize(im, info)
if "background" not in info and "background" in im.info:
info["background"] = im.info["background"]
im_mod = _normalize_palette(im, palette, info)
im.palette = im_mod.palette
im.im = im_mod.im
header = _get_global_header(im, info)
return header, used_palette_colors
def getdata(
im: Image.Image, offset: tuple[int, int] = (0, 0), **params: Any
) -> list[bytes]:
"""
Legacy Method
Return a list of strings representing this image.
The first string is a local image header, the rest contains
encoded image data.
To specify duration, add the time in milliseconds,
e.g. ``getdata(im_frame, duration=1000)``
:param im: Image object
:param offset: Tuple of (x, y) pixels. Defaults to (0, 0)
:param \\**params: e.g. duration or other encoder info parameters
:returns: List of bytes containing GIF encoded frame data
"""
from io import BytesIO
class Collector(BytesIO):
data = []
def write(self, data: Buffer) -> int:
self.data.append(data)
return len(data)
im.load() # make sure raster data is available
fp = Collector()
_write_frame_data(fp, im, offset, params)
return fp.data
# --------------------------------------------------------------------
# Registry
Image.register_open(GifImageFile.format, GifImageFile, _accept)
Image.register_save(GifImageFile.format, _save)
Image.register_save_all(GifImageFile.format, _save_all)
Image.register_extension(GifImageFile.format, ".gif")
Image.register_mime(GifImageFile.format, "image/gif")
#
# Uncomment the following line if you wish to use NETPBM/PBMPLUS
# instead of the built-in "uncompressed" GIF encoder
# Image.register_save(GifImageFile.format, _save_netpbm)
venv\Lib\site-packages\PIL\GimpGradientFile.py
#
# Python Imaging Library
# $Id$
#
# stuff to read (and render) GIMP gradient files
#
# History:
# 97-08-23 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
"""
Stuff to translate curve segments to palette values (derived from
the corresponding code in GIMP, written by Federico Mena Quintero.
See the GIMP distribution for more information.)
"""
from __future__ import annotations
from math import log, pi, sin, sqrt
from typing import IO, Callable
from ._binary import o8
EPSILON = 1e-10
"""""" # Enable auto-doc for data member
def linear(middle: float, pos: float) -> float:
if pos <= middle:
if middle < EPSILON:
return 0.0
else:
return 0.5 * pos / middle
else:
pos = pos - middle
middle = 1.0 - middle
if middle < EPSILON:
return 1.0
else:
return 0.5 + 0.5 * pos / middle
def curved(middle: float, pos: float) -> float:
return pos ** (log(0.5) / log(max(middle, EPSILON)))
def sine(middle: float, pos: float) -> float:
return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0
def sphere_increasing(middle: float, pos: float) -> float:
return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2)
def sphere_decreasing(middle: float, pos: float) -> float:
return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2)
SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing]
"""""" # Enable auto-doc for data member
class GradientFile:
gradient: (
list[
tuple[
float,
float,
float,
list[float],
list[float],
Callable[[float, float], float],
]
]
| None
) = None
def getpalette(self, entries: int = 256) -> tuple[bytes, str]:
assert self.gradient is not None
palette = []
ix = 0
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
for i in range(entries):
x = i / (entries - 1)
while x1 < x:
ix += 1
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
w = x1 - x0
if w < EPSILON:
scale = segment(0.5, 0.5)
else:
scale = segment((xm - x0) / w, (x - x0) / w)
# expand to RGBA
r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5))
g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5))
b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5))
a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5))
# add to palette
palette.append(r + g + b + a)
return b"".join(palette), "RGBA"
class GimpGradientFile(GradientFile):
"""File handler for GIMP's gradient format."""
def __init__(self, fp: IO[bytes]) -> None:
if not fp.readline().startswith(b"GIMP Gradient"):
msg = "not a GIMP gradient file"
raise SyntaxError(msg)
line = fp.readline()
# GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do
if line.startswith(b"Name: "):
line = fp.readline().strip()
count = int(line)
self.gradient = []
for i in range(count):
s = fp.readline().split()
w = [float(x) for x in s[:11]]
x0, x1 = w[0], w[2]
xm = w[1]
rgb0 = w[3:7]
rgb1 = w[7:11]
segment = SEGMENTS[int(s[11])]
cspace = int(s[12])
if cspace != 0:
msg = "cannot handle HSV colour space"
raise OSError(msg)
self.gradient.append((x0, x1, xm, rgb0, rgb1, segment))
venv\Lib\site-packages\PIL\GimpPaletteFile.py
#
# Python Imaging Library
# $Id$
#
# stuff to read GIMP palette files
#
# History:
# 1997-08-23 fl Created
# 2004-09-07 fl Support GIMP 2.0 palette files.
#
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
# Copyright (c) Fredrik Lundh 1997-2004.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import re
from io import BytesIO
from typing import IO
class GimpPaletteFile:
"""File handler for GIMP's palette format."""
rawmode = "RGB"
def _read(self, fp: IO[bytes], limit: bool = True) -> None:
if not fp.readline().startswith(b"GIMP Palette"):
msg = "not a GIMP palette file"
raise SyntaxError(msg)
palette: list[int] = []
i = 0
while True:
if limit and i == 256 + 3:
break
i += 1
s = fp.readline()
if not s:
break
# skip fields and comment lines
if re.match(rb"\w+:|#", s):
continue
if limit and len(s) > 100:
msg = "bad palette file"
raise SyntaxError(msg)
v = s.split(maxsplit=3)
if len(v) < 3:
msg = "bad palette entry"
raise ValueError(msg)
palette += (int(v[i]) for i in range(3))
if limit and len(palette) == 768:
break
self.palette = bytes(palette)
def __init__(self, fp: IO[bytes]) -> None:
self._read(fp)
@classmethod
def frombytes(cls, data: bytes) -> GimpPaletteFile:
self = cls.__new__(cls)
self._read(BytesIO(data), False)
return self
def getpalette(self) -> tuple[bytes, str]:
return self.palette, self.rawmode
venv\Lib\site-packages\PIL\GribStubImagePlugin.py
#
# The Python Imaging Library
# $Id$
#
# GRIB stub adapter
#
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
from typing import IO
from . import Image, ImageFile
_handler = None
def register_handler(handler: ImageFile.StubHandler | None) -> None:
"""
Install application-specific GRIB image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"GRIB") and prefix[7] == 1
class GribStubImageFile(ImageFile.StubImageFile):
format = "GRIB"
format_description = "GRIB"
def _open(self) -> None:
if not _accept(self.fp.read(8)):
msg = "Not a GRIB file"
raise SyntaxError(msg)
self.fp.seek(-8, os.SEEK_CUR)
# make something up
self._mode = "F"
self._size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self) -> ImageFile.StubHandler | None:
return _handler
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if _handler is None or not hasattr(_handler, "save"):
msg = "GRIB save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept)
Image.register_save(GribStubImageFile.format, _save)
Image.register_extension(GribStubImageFile.format, ".grib")
venv\Lib\site-packages\PIL\Hdf5StubImagePlugin.py
#
# The Python Imaging Library
# $Id$
#
# HDF5 stub adapter
#
# Copyright (c) 2000-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
from typing import IO
from . import Image, ImageFile
_handler = None
def register_handler(handler: ImageFile.StubHandler | None) -> None:
"""
Install application-specific HDF5 image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"\x89HDF\r\n\x1a\n")
class HDF5StubImageFile(ImageFile.StubImageFile):
format = "HDF5"
format_description = "HDF5"
def _open(self) -> None:
if not _accept(self.fp.read(8)):
msg = "Not an HDF file"
raise SyntaxError(msg)
self.fp.seek(-8, os.SEEK_CUR)
# make something up
self._mode = "F"
self._size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self) -> ImageFile.StubHandler | None:
return _handler
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if _handler is None or not hasattr(_handler, "save"):
msg = "HDF5 save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept)
Image.register_save(HDF5StubImageFile.format, _save)
Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"])
venv\Lib\site-packages\PIL\IcnsImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# macOS icns file decoder, based on icns.py by Bob Ippolito.
#
# history:
# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
# 2020-04-04 Allow saving on all operating systems.
#
# Copyright (c) 2004 by Bob Ippolito.
# Copyright (c) 2004 by Secret Labs.
# Copyright (c) 2004 by Fredrik Lundh.
# Copyright (c) 2014 by Alastair Houghton.
# Copyright (c) 2020 by Pan Jing.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
import os
import struct
import sys
from typing import IO
from . import Image, ImageFile, PngImagePlugin, features
from ._deprecate import deprecate
enable_jpeg2k = features.check_codec("jpg_2000")
if enable_jpeg2k:
from . import Jpeg2KImagePlugin
MAGIC = b"icns"
HEADERSIZE = 8
def nextheader(fobj: IO[bytes]) -> tuple[bytes, int]:
return struct.unpack(">4sI", fobj.read(HEADERSIZE))
def read_32t(
fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int]
) -> dict[str, Image.Image]:
# The 128x128 icon seems to have an extra header for some reason.
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(4)
if sig != b"\x00\x00\x00\x00":
msg = "Unknown signature, expecting 0x00000000"
raise SyntaxError(msg)
return read_32(fobj, (start + 4, length - 4), size)
def read_32(
fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int]
) -> dict[str, Image.Image]:
"""
Read a 32bit RGB icon resource. Seems to be either uncompressed or
an RLE packbits-like scheme.
"""
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
if length == sizesq * 3:
# uncompressed ("RGBRGBGB")
indata = fobj.read(length)
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
else:
# decode image
im = Image.new("RGB", pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while bytesleft > 0:
byte = fobj.read(1)
if not byte:
break
byte_int = byte[0]
if byte_int & 0x80:
blocksize = byte_int - 125
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = byte_int + 1
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if bytesleft <= 0:
break
if bytesleft != 0:
msg = f"Error reading channel [{repr(bytesleft)} left]"
raise SyntaxError(msg)
band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1)
im.im.putband(band.im, band_ix)
return {"RGB": im}
def read_mk(
fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int]
) -> dict[str, Image.Image]:
# Alpha masks seem to be uncompressed
start = start_length[0]
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1)
return {"A": band}
def read_png_or_jpeg2000(
fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int]
) -> dict[str, Image.Image]:
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(12)
im: Image.Image
if sig.startswith(b"\x89PNG\x0d\x0a\x1a\x0a"):
fobj.seek(start)
im = PngImagePlugin.PngImageFile(fobj)
Image._decompression_bomb_check(im.size)
return {"RGBA": im}
elif (
sig.startswith((b"\xff\x4f\xff\x51", b"\x0d\x0a\x87\x0a"))
or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a"
):
if not enable_jpeg2k:
msg = (
"Unsupported icon subimage format (rebuild PIL "
"with JPEG 2000 support to fix this)"
)
raise ValueError(msg)
# j2k, jpc or j2c
fobj.seek(start)
jp2kstream = fobj.read(length)
f = io.BytesIO(jp2kstream)
im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
Image._decompression_bomb_check(im.size)
if im.mode != "RGBA":
im = im.convert("RGBA")
return {"RGBA": im}
else:
msg = "Unsupported icon subimage format"
raise ValueError(msg)
class IcnsFile:
SIZES = {
(512, 512, 2): [(b"ic10", read_png_or_jpeg2000)],
(512, 512, 1): [(b"ic09", read_png_or_jpeg2000)],
(256, 256, 2): [(b"ic14", read_png_or_jpeg2000)],
(256, 256, 1): [(b"ic08", read_png_or_jpeg2000)],
(128, 128, 2): [(b"ic13", read_png_or_jpeg2000)],
(128, 128, 1): [
(b"ic07", read_png_or_jpeg2000),
(b"it32", read_32t),
(b"t8mk", read_mk),
],
(64, 64, 1): [(b"icp6", read_png_or_jpeg2000)],
(32, 32, 2): [(b"ic12", read_png_or_jpeg2000)],
(48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)],
(32, 32, 1): [
(b"icp5", read_png_or_jpeg2000),
(b"il32", read_32),
(b"l8mk", read_mk),
],
(16, 16, 2): [(b"ic11", read_png_or_jpeg2000)],
(16, 16, 1): [
(b"icp4", read_png_or_jpeg2000),
(b"is32", read_32),
(b"s8mk", read_mk),
],
}
def __init__(self, fobj: IO[bytes]) -> None:
"""
fobj is a file-like object as an icns resource
"""
# signature : (start, length)
self.dct = {}
self.fobj = fobj
sig, filesize = nextheader(fobj)
if not _accept(sig):
msg = "not an icns file"
raise SyntaxError(msg)
i = HEADERSIZE
while i < filesize:
sig, blocksize = nextheader(fobj)
if blocksize <= 0:
msg = "invalid block header"
raise SyntaxError(msg)
i += HEADERSIZE
blocksize -= HEADERSIZE
self.dct[sig] = (i, blocksize)
fobj.seek(blocksize, io.SEEK_CUR)
i += blocksize
def itersizes(self) -> list[tuple[int, int, int]]:
sizes = []
for size, fmts in self.SIZES.items():
for fmt, reader in fmts:
if fmt in self.dct:
sizes.append(size)
break
return sizes
def bestsize(self) -> tuple[int, int, int]:
sizes = self.itersizes()
if not sizes:
msg = "No 32bit icon resources found"
raise SyntaxError(msg)
return max(sizes)
def dataforsize(self, size: tuple[int, int, int]) -> dict[str, Image.Image]:
"""
Get an icon resource as {channel: array}. Note that
the arrays are bottom-up like windows bitmaps and will likely
need to be flipped or transposed in some way.
"""
dct = {}
for code, reader in self.SIZES[size]:
desc = self.dct.get(code)
if desc is not None:
dct.update(reader(self.fobj, desc, size))
return dct
def getimage(
self, size: tuple[int, int] | tuple[int, int, int] | None = None
) -> Image.Image:
if size is None:
size = self.bestsize()
elif len(size) == 2:
size = (size[0], size[1], 1)
channels = self.dataforsize(size)
im = channels.get("RGBA")
if im:
return im
im = channels["RGB"].copy()
try:
im.putalpha(channels["A"])
except KeyError:
pass
return im
##
# Image plugin for Mac OS icons.
class IcnsImageFile(ImageFile.ImageFile):
"""
PIL image support for Mac OS .icns files.
Chooses the best resolution, but will possibly load
a different size image if you mutate the size attribute
before calling 'load'.
The info dictionary has a key 'sizes' that is a list
of sizes that the icns file has.
"""
format = "ICNS"
format_description = "Mac OS icns resource"
def _open(self) -> None:
self.icns = IcnsFile(self.fp)
self._mode = "RGBA"
self.info["sizes"] = self.icns.itersizes()
self.best_size = self.icns.bestsize()
self.size = (
self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2],
)
@property # type: ignore[override]
def size(self) -> tuple[int, int] | tuple[int, int, int]:
return self._size
@size.setter
def size(self, value: tuple[int, int] | tuple[int, int, int]) -> None:
if len(value) == 3:
deprecate("Setting size to (width, height, scale)", 12, "load(scale)")
if value in self.info["sizes"]:
self._size = value # type: ignore[assignment]
return
else:
# Check that a matching size exists,
# or that there is a scale that would create a size that matches
for size in self.info["sizes"]:
simple_size = size[0] * size[2], size[1] * size[2]
scale = simple_size[0] // value[0]
if simple_size[1] / value[1] == scale:
self._size = value
return
msg = "This is not one of the allowed sizes of this image"
raise ValueError(msg)
def load(self, scale: int | None = None) -> Image.core.PixelAccess | None:
if scale is not None or len(self.size) == 3:
if scale is None and len(self.size) == 3:
scale = self.size[2]
assert scale is not None
width, height = self.size[:2]
self.size = width * scale, height * scale
self.best_size = width, height, scale
px = Image.Image.load(self)
if self._im is not None and self.im.size == self.size:
# Already loaded
return px
self.load_prepare()
# This is likely NOT the best way to do it, but whatever.
im = self.icns.getimage(self.best_size)
# If this is a PNG or JPEG 2000, it won't be loaded yet
px = im.load()
self.im = im.im
self._mode = im.mode
self.size = im.size
return px
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
"""
Saves the image as a series of PNG files,
that are then combined into a .icns file.
"""
if hasattr(fp, "flush"):
fp.flush()
sizes = {
b"ic07": 128,
b"ic08": 256,
b"ic09": 512,
b"ic10": 1024,
b"ic11": 32,
b"ic12": 64,
b"ic13": 256,
b"ic14": 512,
}
provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])}
size_streams = {}
for size in set(sizes.values()):
image = (
provided_images[size]
if size in provided_images
else im.resize((size, size))
)
temp = io.BytesIO()
image.save(temp, "png")
size_streams[size] = temp.getvalue()
entries = []
for type, size in sizes.items():
stream = size_streams[size]
entries.append((type, HEADERSIZE + len(stream), stream))
# Header
fp.write(MAGIC)
file_length = HEADERSIZE # Header
file_length += HEADERSIZE + 8 * len(entries) # TOC
file_length += sum(entry[1] for entry in entries)
fp.write(struct.pack(">i", file_length))
# TOC
fp.write(b"TOC ")
fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE))
for entry in entries:
fp.write(entry[0])
fp.write(struct.pack(">i", entry[1]))
# Data
for entry in entries:
fp.write(entry[0])
fp.write(struct.pack(">i", entry[1]))
fp.write(entry[2])
if hasattr(fp, "flush"):
fp.flush()
def _accept(prefix: bytes) -> bool:
return prefix.startswith(MAGIC)
Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept)
Image.register_extension(IcnsImageFile.format, ".icns")
Image.register_save(IcnsImageFile.format, _save)
Image.register_mime(IcnsImageFile.format, "image/icns")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Syntax: python3 IcnsImagePlugin.py [file]")
sys.exit()
with open(sys.argv[1], "rb") as fp:
imf = IcnsImageFile(fp)
for size in imf.info["sizes"]:
width, height, scale = imf.size = size
imf.save(f"out-{width}-{height}-{scale}.png")
with Image.open(sys.argv[1]) as im:
im.save("out.png")
if sys.platform == "windows":
os.startfile("out.png")
venv\Lib\site-packages\PIL\IcoImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# Windows Icon support for PIL
#
# History:
# 96-05-27 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
# .
# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
#
# Icon format references:
# * https://en.wikipedia.org/wiki/ICO_(file_format)
# * https://msdn.microsoft.com/en-us/library/ms997538.aspx
from __future__ import annotations
import warnings
from io import BytesIO
from math import ceil, log
from typing import IO, NamedTuple
from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin
from ._binary import i16le as i16
from ._binary import i32le as i32
from ._binary import o8
from ._binary import o16le as o16
from ._binary import o32le as o32
#
# --------------------------------------------------------------------
_MAGIC = b"\0\0\1\0"
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
fp.write(_MAGIC) # (2+2)
bmp = im.encoderinfo.get("bitmap_format") == "bmp"
sizes = im.encoderinfo.get(
"sizes",
[(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)],
)
frames = []
provided_ims = [im] + im.encoderinfo.get("append_images", [])
width, height = im.size
for size in sorted(set(sizes)):
if size[0] > width or size[1] > height or size[0] > 256 or size[1] > 256:
continue
for provided_im in provided_ims:
if provided_im.size != size:
continue
frames.append(provided_im)
if bmp:
bits = BmpImagePlugin.SAVE[provided_im.mode][1]
bits_used = [bits]
for other_im in provided_ims:
if other_im.size != size:
continue
bits = BmpImagePlugin.SAVE[other_im.mode][1]
if bits not in bits_used:
# Another image has been supplied for this size
# with a different bit depth
frames.append(other_im)
bits_used.append(bits)
break
else:
# TODO: invent a more convenient method for proportional scalings
frame = provided_im.copy()
frame.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None)
frames.append(frame)
fp.write(o16(len(frames))) # idCount(2)
offset = fp.tell() + len(frames) * 16
for frame in frames:
width, height = frame.size
# 0 means 256
fp.write(o8(width if width < 256 else 0)) # bWidth(1)
fp.write(o8(height if height < 256 else 0)) # bHeight(1)
bits, colors = BmpImagePlugin.SAVE[frame.mode][1:] if bmp else (32, 0)
fp.write(o8(colors)) # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
fp.write(o16(bits)) # wBitCount(2)
image_io = BytesIO()
if bmp:
frame.save(image_io, "dib")
if bits != 32:
and_mask = Image.new("1", size)
ImageFile._save(
and_mask,
image_io,
[ImageFile._Tile("raw", (0, 0) + size, 0, ("1", 0, -1))],
)
else:
frame.save(image_io, "png")
image_io.seek(0)
image_bytes = image_io.read()
if bmp:
image_bytes = image_bytes[:8] + o32(height * 2) + image_bytes[12:]
bytes_len = len(image_bytes)
fp.write(o32(bytes_len)) # dwBytesInRes(4)
fp.write(o32(offset)) # dwImageOffset(4)
current = fp.tell()
fp.seek(offset)
fp.write(image_bytes)
offset = offset + bytes_len
fp.seek(current)
def _accept(prefix: bytes) -> bool:
return prefix.startswith(_MAGIC)
class IconHeader(NamedTuple):
width: int
height: int
nb_color: int
reserved: int
planes: int
bpp: int
size: int
offset: int
dim: tuple[int, int]
square: int
color_depth: int
class IcoFile:
def __init__(self, buf: IO[bytes]) -> None:
"""
Parse image from file-like object containing ico file data
"""
# check magic
s = buf.read(6)
if not _accept(s):
msg = "not an ICO file"
raise SyntaxError(msg)
self.buf = buf
self.entry = []
# Number of items in file
self.nb_items = i16(s, 4)
# Get headers for each item
for i in range(self.nb_items):
s = buf.read(16)
# See Wikipedia
width = s[0] or 256
height = s[1] or 256
# No. of colors in image (0 if >=8bpp)
nb_color = s[2]
bpp = i16(s, 6)
icon_header = IconHeader(
width=width,
height=height,
nb_color=nb_color,
reserved=s[3],
planes=i16(s, 4),
bpp=i16(s, 6),
size=i32(s, 8),
offset=i32(s, 12),
dim=(width, height),
square=width * height,
# See Wikipedia notes about color depth.
# We need this just to differ images with equal sizes
color_depth=bpp or (nb_color != 0 and ceil(log(nb_color, 2))) or 256,
)
self.entry.append(icon_header)
self.entry = sorted(self.entry, key=lambda x: x.color_depth)
# ICO images are usually squares
self.entry = sorted(self.entry, key=lambda x: x.square, reverse=True)
def sizes(self) -> set[tuple[int, int]]:
"""
Get a set of all available icon sizes and color depths.
"""
return {(h.width, h.height) for h in self.entry}
def getentryindex(self, size: tuple[int, int], bpp: int | bool = False) -> int:
for i, h in enumerate(self.entry):
if size == h.dim and (bpp is False or bpp == h.color_depth):
return i
return 0
def getimage(self, size: tuple[int, int], bpp: int | bool = False) -> Image.Image:
"""
Get an image from the icon
"""
return self.frame(self.getentryindex(size, bpp))
def frame(self, idx: int) -> Image.Image:
"""
Get an image from frame idx
"""
header = self.entry[idx]
self.buf.seek(header.offset)
data = self.buf.read(8)
self.buf.seek(header.offset)
im: Image.Image
if data[:8] == PngImagePlugin._MAGIC:
# png frame
im = PngImagePlugin.PngImageFile(self.buf)
Image._decompression_bomb_check(im.size)
else:
# XOR + AND mask bmp frame
im = BmpImagePlugin.DibImageFile(self.buf)
Image._decompression_bomb_check(im.size)
# change tile dimension to only encompass XOR image
im._size = (im.size[0], int(im.size[1] / 2))
d, e, o, a = im.tile[0]
im.tile[0] = ImageFile._Tile(d, (0, 0) + im.size, o, a)
# figure out where AND mask image starts
if header.bpp == 32:
# 32-bit color depth icon image allows semitransparent areas
# PIL's DIB format ignores transparency bits, recover them.
# The DIB is packed in BGRX byte order where X is the alpha
# channel.
# Back up to start of bmp data
self.buf.seek(o)
# extract every 4th byte (eg. 3,7,11,15,...)
alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
# convert to an 8bpp grayscale image
try:
mask = Image.frombuffer(
"L", # 8bpp
im.size, # (w, h)
alpha_bytes, # source chars
"raw", # raw decoder
("L", 0, -1), # 8bpp inverted, unpadded, reversed
)
except ValueError:
if ImageFile.LOAD_TRUNCATED_IMAGES:
mask = None
else:
raise
else:
# get AND image from end of bitmap
w = im.size[0]
if (w % 32) > 0:
# bitmap row data is aligned to word boundaries
w += 32 - (im.size[0] % 32)
# the total mask data is
# padded row size * height / bits per char
total_bytes = int((w * im.size[1]) / 8)
and_mask_offset = header.offset + header.size - total_bytes
self.buf.seek(and_mask_offset)
mask_data = self.buf.read(total_bytes)
# convert raw data to image
try:
mask = Image.frombuffer(
"1", # 1 bpp
im.size, # (w, h)
mask_data, # source chars
"raw", # raw decoder
("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed
)
except ValueError:
if ImageFile.LOAD_TRUNCATED_IMAGES:
mask = None
else:
raise
# now we have two images, im is XOR image and mask is AND image
# apply mask image as alpha channel
if mask:
im = im.convert("RGBA")
im.putalpha(mask)
return im
##
# Image plugin for Windows Icon files.
class IcoImageFile(ImageFile.ImageFile):
"""
PIL read-only image support for Microsoft Windows .ico files.
By default the largest resolution image in the file will be loaded. This
can be changed by altering the 'size' attribute before calling 'load'.
The info dictionary has a key 'sizes' that is a list of the sizes available
in the icon file.
Handles classic, XP and Vista icon formats.
When saving, PNG compression is used. Support for this was only added in
Windows Vista. If you are unable to view the icon in Windows, convert the
image to "RGBA" mode before saving.
This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
.
https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
"""
format = "ICO"
format_description = "Windows Icon"
def _open(self) -> None:
self.ico = IcoFile(self.fp)
self.info["sizes"] = self.ico.sizes()
self.size = self.ico.entry[0].dim
self.load()
@property
def size(self) -> tuple[int, int]:
return self._size
@size.setter
def size(self, value: tuple[int, int]) -> None:
if value not in self.info["sizes"]:
msg = "This is not one of the allowed sizes of this image"
raise ValueError(msg)
self._size = value
def load(self) -> Image.core.PixelAccess | None:
if self._im is not None and self.im.size == self.size:
# Already loaded
return Image.Image.load(self)
im = self.ico.getimage(self.size)
# if tile is PNG, it won't really be loaded yet
im.load()
self.im = im.im
self._mode = im.mode
if im.palette:
self.palette = im.palette
if im.size != self.size:
warnings.warn("Image was not the expected size")
index = self.ico.getentryindex(self.size)
sizes = list(self.info["sizes"])
sizes[index] = im.size
self.info["sizes"] = set(sizes)
self.size = im.size
return Image.Image.load(self)
def load_seek(self, pos: int) -> None:
# Flag the ImageFile.Parser so that it
# just does all the decode at the end.
pass
#
# --------------------------------------------------------------------
Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
Image.register_save(IcoImageFile.format, _save)
Image.register_extension(IcoImageFile.format, ".ico")
Image.register_mime(IcoImageFile.format, "image/x-icon")
venv\Lib\site-packages\PIL\Image.py
#
# The Python Imaging Library.
# $Id$
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
# 2005-03-28 fl PIL release 1.1.5
# 2006-12-02 fl PIL release 1.1.6
# 2009-11-15 fl PIL release 1.1.7
#
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2009 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import abc
import atexit
import builtins
import io
import logging
import math
import os
import re
import struct
import sys
import tempfile
import warnings
from collections.abc import Callable, Iterator, MutableMapping, Sequence
from enum import IntEnum
from types import ModuleType
from typing import IO, Any, Literal, Protocol, cast
# VERSION was removed in Pillow 6.0.0.
# PILLOW_VERSION was removed in Pillow 9.0.0.
# Use __version__ instead.
from . import (
ExifTags,
ImageMode,
TiffTags,
UnidentifiedImageError,
__version__,
_plugins,
)
from ._binary import i32le, o32be, o32le
from ._deprecate import deprecate
from ._util import DeferredError, is_path
ElementTree: ModuleType | None
try:
from defusedxml import ElementTree
except ImportError:
ElementTree = None
logger = logging.getLogger(__name__)
class DecompressionBombWarning(RuntimeWarning):
pass
class DecompressionBombError(Exception):
pass
WARN_POSSIBLE_FORMATS: bool = False
# Limit to around a quarter gigabyte for a 24-bit (3 bpp) image
MAX_IMAGE_PIXELS: int | None = int(1024 * 1024 * 1024 // 4 // 3)
try:
# If the _imaging C module is not present, Pillow will not load.
# Note that other modules should not refer to _imaging directly;
# import Image and use the Image.core variable instead.
# Also note that Image.core is not a publicly documented interface,
# and should be considered private and subject to change.
from . import _imaging as core
if __version__ != getattr(core, "PILLOW_VERSION", None):
msg = (
"The _imaging extension was built for another version of Pillow or PIL:\n"
f"Core version: {getattr(core, 'PILLOW_VERSION', None)}\n"
f"Pillow version: {__version__}"
)
raise ImportError(msg)
except ImportError as v:
core = DeferredError.new(ImportError("The _imaging C module is not installed."))
# Explanations for ways that we know we might have an import error
if str(v).startswith("Module use of python"):
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version of Python.",
RuntimeWarning,
)
elif str(v).startswith("The _imaging extension"):
warnings.warn(str(v), RuntimeWarning)
# Fail here anyway. Don't let people run with a mostly broken Pillow.
# see docs/porting.rst
raise
def isImageType(t: Any) -> TypeGuard[Image]:
"""
Checks if an object is an image object.
.. warning::
This function is for internal use only.
:param t: object to check if it's an image
:returns: True if the object is an image
"""
deprecate("Image.isImageType(im)", 12, "isinstance(im, Image.Image)")
return hasattr(t, "im")
#
# Constants
# transpose
class Transpose(IntEnum):
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
TRANSVERSE = 6
# transforms (also defined in Imaging.h)
class Transform(IntEnum):
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters (also defined in Imaging.h)
class Resampling(IntEnum):
NEAREST = 0
BOX = 4
BILINEAR = 2
HAMMING = 5
BICUBIC = 3
LANCZOS = 1
_filters_support = {
Resampling.BOX: 0.5,
Resampling.BILINEAR: 1.0,
Resampling.HAMMING: 1.0,
Resampling.BICUBIC: 2.0,
Resampling.LANCZOS: 3.0,
}
# dithers
class Dither(IntEnum):
NONE = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
class Palette(IntEnum):
WEB = 0
ADAPTIVE = 1
class Quantize(IntEnum):
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
LIBIMAGEQUANT = 3
module = sys.modules[__name__]
for enum in (Transpose, Transform, Resampling, Dither, Palette, Quantize):
for item in enum:
setattr(module, item.name, item.value)
if hasattr(core, "DEFAULT_STRATEGY"):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
FILTERED = core.FILTERED
HUFFMAN_ONLY = core.HUFFMAN_ONLY
RLE = core.RLE
FIXED = core.FIXED
# --------------------------------------------------------------------
# Registries
TYPE_CHECKING = False
if TYPE_CHECKING:
import mmap
from xml.etree.ElementTree import Element
from IPython.lib.pretty import PrettyPrinter
from . import ImageFile, ImageFilter, ImagePalette, ImageQt, TiffImagePlugin
from ._typing import CapsuleType, NumpyArray, StrOrBytesPath, TypeGuard
ID: list[str] = []
OPEN: dict[
str,
tuple[
Callable[[IO[bytes], str | bytes], ImageFile.ImageFile],
Callable[[bytes], bool | str] | None,
],
] = {}
MIME: dict[str, str] = {}
SAVE: dict[str, Callable[[Image, IO[bytes], str | bytes], None]] = {}
SAVE_ALL: dict[str, Callable[[Image, IO[bytes], str | bytes], None]] = {}
EXTENSION: dict[str, str] = {}
DECODERS: dict[str, type[ImageFile.PyDecoder]] = {}
ENCODERS: dict[str, type[ImageFile.PyEncoder]] = {}
# --------------------------------------------------------------------
# Modes
_ENDIAN = "<" if sys.byteorder == "little" else ">"
def _conv_type_shape(im: Image) -> tuple[tuple[int, ...], str]:
m = ImageMode.getmode(im.mode)
shape: tuple[int, ...] = (im.height, im.width)
extra = len(m.bands)
if extra != 1:
shape += (extra,)
return shape, m.typestr
MODES = [
"1",
"CMYK",
"F",
"HSV",
"I",
"I;16",
"I;16B",
"I;16L",
"I;16N",
"L",
"LA",
"La",
"LAB",
"P",
"PA",
"RGB",
"RGBA",
"RGBa",
"RGBX",
"YCbCr",
]
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
def getmodebase(mode: str) -> str:
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basemode
def getmodetype(mode: str) -> str:
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basetype
def getmodebandnames(mode: str) -> tuple[str, ...]:
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).bands
def getmodebands(mode: str) -> int:
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
def preinit() -> None:
"""
Explicitly loads BMP, GIF, JPEG, PPM and PPM file format drivers.
It is called when opening or saving images.
"""
global _initialized
if _initialized >= 1:
return
try:
from . import BmpImagePlugin
assert BmpImagePlugin
except ImportError:
pass
try:
from . import GifImagePlugin
assert GifImagePlugin
except ImportError:
pass
try:
from . import JpegImagePlugin
assert JpegImagePlugin
except ImportError:
pass
try:
from . import PpmImagePlugin
assert PpmImagePlugin
except ImportError:
pass
try:
from . import PngImagePlugin
assert PngImagePlugin
except ImportError:
pass
_initialized = 1
def init() -> bool:
"""
Explicitly initializes the Python Imaging Library. This function
loads all available file format drivers.
It is called when opening or saving images if :py:meth:`~preinit()` is
insufficient, and by :py:meth:`~PIL.features.pilinfo`.
"""
global _initialized
if _initialized >= 2:
return False
parent_name = __name__.rpartition(".")[0]
for plugin in _plugins:
try:
logger.debug("Importing %s", plugin)
__import__(f"{parent_name}.{plugin}", globals(), locals(), [])
except ImportError as e:
logger.debug("Image: failed to import %s: %s", plugin, e)
if OPEN or SAVE:
_initialized = 2
return True
return False
# --------------------------------------------------------------------
# Codec factories (used by tobytes/frombytes and ImageFile.load)
def _getdecoder(
mode: str, decoder_name: str, args: Any, extra: tuple[Any, ...] = ()
) -> core.ImagingDecoder | ImageFile.PyDecoder:
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
decoder = DECODERS[decoder_name]
except KeyError:
pass
else:
return decoder(mode, *args + extra)
try:
# get decoder
decoder = getattr(core, f"{decoder_name}_decoder")
except AttributeError as e:
msg = f"decoder {decoder_name} not available"
raise OSError(msg) from e
return decoder(mode, *args + extra)
def _getencoder(
mode: str, encoder_name: str, args: Any, extra: tuple[Any, ...] = ()
) -> core.ImagingEncoder | ImageFile.PyEncoder:
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
encoder = ENCODERS[encoder_name]
except KeyError:
pass
else:
return encoder(mode, *args + extra)
try:
# get encoder
encoder = getattr(core, f"{encoder_name}_encoder")
except AttributeError as e:
msg = f"encoder {encoder_name} not available"
raise OSError(msg) from e
return encoder(mode, *args + extra)
# --------------------------------------------------------------------
# Simple expression analyzer
class ImagePointTransform:
"""
Used with :py:meth:`~PIL.Image.Image.point` for single band images with more than
8 bits, this represents an affine transformation, where the value is multiplied by
``scale`` and ``offset`` is added.
"""
def __init__(self, scale: float, offset: float) -> None:
self.scale = scale
self.offset = offset
def __neg__(self) -> ImagePointTransform:
return ImagePointTransform(-self.scale, -self.offset)
def __add__(self, other: ImagePointTransform | float) -> ImagePointTransform:
if isinstance(other, ImagePointTransform):
return ImagePointTransform(
self.scale + other.scale, self.offset + other.offset
)
return ImagePointTransform(self.scale, self.offset + other)
__radd__ = __add__
def __sub__(self, other: ImagePointTransform | float) -> ImagePointTransform:
return self + -other
def __rsub__(self, other: ImagePointTransform | float) -> ImagePointTransform:
return other + -self
def __mul__(self, other: ImagePointTransform | float) -> ImagePointTransform:
if isinstance(other, ImagePointTransform):
return NotImplemented
return ImagePointTransform(self.scale * other, self.offset * other)
__rmul__ = __mul__
def __truediv__(self, other: ImagePointTransform | float) -> ImagePointTransform:
if isinstance(other, ImagePointTransform):
return NotImplemented
return ImagePointTransform(self.scale / other, self.offset / other)
def _getscaleoffset(
expr: Callable[[ImagePointTransform], ImagePointTransform | float],
) -> tuple[float, float]:
a = expr(ImagePointTransform(1, 0))
return (a.scale, a.offset) if isinstance(a, ImagePointTransform) else (0, a)
# --------------------------------------------------------------------
# Implementation wrapper
class SupportsGetData(Protocol):
def getdata(
self,
) -> tuple[Transform, Sequence[int]]: ...
class Image:
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format: str | None = None
format_description: str | None = None
_close_exclusive_fp_after_loading = True
def __init__(self) -> None:
# FIXME: take "new" parameters / other image?
self._im: core.ImagingCore | DeferredError | None = None
self._mode = ""
self._size = (0, 0)
self.palette: ImagePalette.ImagePalette | None = None
self.info: dict[str | tuple[int, int], Any] = {}
self.readonly = 0
self._exif: Exif | None = None
@property
def im(self) -> core.ImagingCore:
if isinstance(self._im, DeferredError):
raise self._im.ex
assert self._im is not None
return self._im
@im.setter
def im(self, im: core.ImagingCore) -> None:
self._im = im
@property
def width(self) -> int:
return self.size[0]
@property
def height(self) -> int:
return self.size[1]
@property
def size(self) -> tuple[int, int]:
return self._size
@property
def mode(self) -> str:
return self._mode
@property
def readonly(self) -> int:
return (self._im and self._im.readonly) or self._readonly
@readonly.setter
def readonly(self, readonly: int) -> None:
self._readonly = readonly
def _new(self, im: core.ImagingCore) -> Image:
new = Image()
new.im = im
new._mode = im.mode
new._size = im.size
if im.mode in ("P", "PA"):
if self.palette:
new.palette = self.palette.copy()
else:
from . import ImagePalette
new.palette = ImagePalette.ImagePalette()
new.info = self.info.copy()
return new
# Context manager support
def __enter__(self):
return self
def __exit__(self, *args):
from . import ImageFile
if isinstance(self, ImageFile.ImageFile):
if getattr(self, "_exclusive_fp", False):
self._close_fp()
self.fp = None
def close(self) -> None:
"""
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
if getattr(self, "map", None):
if sys.platform == "win32" and hasattr(sys, "pypy_version_info"):
self.map.close()
self.map: mmap.mmap | None = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self._im = DeferredError(ValueError("Operation on closed image"))
def _copy(self) -> None:
self.load()
self.im = self.im.copy()
self.readonly = 0
def _ensure_mutable(self) -> None:
if self.readonly:
self._copy()
else:
self.load()
def _dump(
self, file: str | None = None, format: str | None = None, **options: Any
) -> str:
suffix = ""
if format:
suffix = f".{format}"
if not file:
f, filename = tempfile.mkstemp(suffix)
os.close(f)
else:
filename = file
if not filename.endswith(suffix):
filename = filename + suffix
self.load()
if not format or format == "PPM":
self.im.save_ppm(filename)
else:
self.save(filename, format, **options)
return filename
def __eq__(self, other: object) -> bool:
if self.__class__ is not other.__class__:
return False
assert isinstance(other, Image)
return (
self.mode == other.mode
and self.size == other.size
and self.info == other.info
and self.getpalette() == other.getpalette()
and self.tobytes() == other.tobytes()
)
def __repr__(self) -> str:
return (
f"<{self.__class__.__module__}.{self.__class__.__name__} "
f"image mode={self.mode} size={self.size[0]}x{self.size[1]} "
f"at 0x{id(self):X}>"
)
def _repr_pretty_(self, p: PrettyPrinter, cycle: bool) -> None:
"""IPython plain text display support"""
# Same as __repr__ but without unpredictable id(self),
# to keep Jupyter notebook `text/plain` output stable.
p.text(
f"<{self.__class__.__module__}.{self.__class__.__name__} "
f"image mode={self.mode} size={self.size[0]}x{self.size[1]}>"
)
def _repr_image(self, image_format: str, **kwargs: Any) -> bytes | None:
"""Helper function for iPython display hook.
:param image_format: Image format.
:returns: image as bytes, saved into the given format.
"""
b = io.BytesIO()
try:
self.save(b, image_format, **kwargs)
except Exception:
return None
return b.getvalue()
def _repr_png_(self) -> bytes | None:
"""iPython display hook support for PNG format.
:returns: PNG version of the image as bytes
"""
return self._repr_image("PNG", compress_level=1)
def _repr_jpeg_(self) -> bytes | None:
"""iPython display hook support for JPEG format.
:returns: JPEG version of the image as bytes
"""
return self._repr_image("JPEG")
@property
def __array_interface__(self) -> dict[str, str | bytes | int | tuple[int, ...]]:
# numpy array interface support
new: dict[str, str | bytes | int | tuple[int, ...]] = {"version": 3}
if self.mode == "1":
# Binary images need to be extended from bits to bytes
# See: https://github.com/python-pillow/Pillow/issues/350
new["data"] = self.tobytes("raw", "L")
else:
new["data"] = self.tobytes()
new["shape"], new["typestr"] = _conv_type_shape(self)
return new
def __arrow_c_schema__(self) -> object:
self.load()
return self.im.__arrow_c_schema__()
def __arrow_c_array__(
self, requested_schema: object | None = None
) -> tuple[object, object]:
self.load()
return (self.im.__arrow_c_schema__(), self.im.__arrow_c_array__())
def __getstate__(self) -> list[Any]:
im_data = self.tobytes() # load image first
return [self.info, self.mode, self.size, self.getpalette(), im_data]
def __setstate__(self, state: list[Any]) -> None:
Image.__init__(self)
info, mode, size, palette, data = state[:5]
self.info = info
self._mode = mode
self._size = size
self.im = core.new(mode, size)
if mode in ("L", "LA", "P", "PA") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name: str = "raw", *args: Any) -> bytes:
"""
Return image as a bytes object.
.. warning::
This method returns raw image data derived from Pillow's internal
storage. For compressed image data (e.g. PNG, JPEG) use
:meth:`~.save`, with a BytesIO parameter for in-memory data.
:param encoder_name: What encoder to use.
The default is to use the standard "raw" encoder.
To see how this packs pixel data into the returned
bytes, see :file:`libImaging/Pack.c`.
A list of C encoders can be seen under codecs
section of the function array in
:file:`_imaging.c`. Python encoders are registered
within the relevant plugins.
:param args: Extra arguments to the encoder.
:returns: A :py:class:`bytes` object.
"""
encoder_args: Any = args
if len(encoder_args) == 1 and isinstance(encoder_args[0], tuple):
# may pass tuple instead of argument list
encoder_args = encoder_args[0]
if encoder_name == "raw" and encoder_args == ():
encoder_args = self.mode
self.load()
if self.width == 0 or self.height == 0:
return b""
# unpack data
e = _getencoder(self.mode, encoder_name, encoder_args)
e.setimage(self.im)
from . import ImageFile
bufsize = max(ImageFile.MAXBLOCK, self.size[0] * 4) # see RawEncode.c
output = []
while True:
bytes_consumed, errcode, data = e.encode(bufsize)
output.append(data)
if errcode:
break
if errcode < 0:
msg = f"encoder error {errcode} in tobytes"
raise RuntimeError(msg)
return b"".join(output)
def tobitmap(self, name: str = "image") -> bytes:
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
msg = "not a bitmap"
raise ValueError(msg)
data = self.tobytes("xbm")
return b"".join(
[
f"#define {name}_width {self.size[0]}\n".encode("ascii"),
f"#define {name}_height {self.size[1]}\n".encode("ascii"),
f"static char {name}_bits[] = {{\n".encode("ascii"),
data,
b"};",
]
)
def frombytes(
self,
data: bytes | bytearray | SupportsArrayInterface,
decoder_name: str = "raw",
*args: Any,
) -> None:
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
if self.width == 0 or self.height == 0:
return
decoder_args: Any = args
if len(decoder_args) == 1 and isinstance(decoder_args[0], tuple):
# may pass tuple instead of argument list
decoder_args = decoder_args[0]
# default format
if decoder_name == "raw" and decoder_args == ():
decoder_args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, decoder_args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
msg = "not enough image data"
raise ValueError(msg)
if s[1] != 0:
msg = "cannot decode image data"
raise ValueError(msg)
def load(self) -> core.PixelAccess | None:
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time.
If the file associated with the image was opened by Pillow, then this
method will close it. The exception to this is if the image has
multiple frames, in which case the file will be left open for seek
operations. See :ref:`file-handling` for more information.
:returns: An image access object.
:rtype: :py:class:`.PixelAccess`
"""
if self._im is not None and self.palette and self.palette.dirty:
# realize palette
mode, arr = self.palette.getdata()
self.im.putpalette(self.palette.mode, mode, arr)
self.palette.dirty = 0
self.palette.rawmode = None
if "transparency" in self.info and mode in ("LA", "PA"):
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
else:
self.palette.palette = self.im.getpalette(
self.palette.mode, self.palette.mode
)
if self._im is not None:
return self.im.pixel_access(self.readonly)
return None
def verify(self) -> None:
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(
self,
mode: str | None = None,
matrix: tuple[float, ...] | None = None,
dither: Dither | None = None,
palette: Palette = Palette.WEB,
colors: int = 256,
) -> Image:
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
This supports all possible conversions between "L", "RGB" and "CMYK". The
``matrix`` argument only supports "L" and "RGB".
When translating a color image to grayscale (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a grayscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is ``None``, all values larger than 127 are set to 255 (white),
all other values to 0 (black). To use other thresholds, use the
:py:meth:`~PIL.Image.Image.point` method.
When converting from "RGBA" to "P" without a ``matrix`` argument,
this passes the operation to :py:meth:`~PIL.Image.Image.quantize`,
and ``dither`` and ``palette`` are ignored.
When converting from "PA", if an "RGBA" palette is present, the alpha
channel from the image will be used instead of the values from the palette.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default). Note that this is not used when ``matrix`` is supplied.
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are :data:`Palette.WEB` or
:data:`Palette.ADAPTIVE`.
:param colors: Number of colors to use for the :data:`Palette.ADAPTIVE`
palette. Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if mode in ("BGR;15", "BGR;16", "BGR;24"):
deprecate(mode, 12)
self.load()
has_transparency = "transparency" in self.info
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if mode == "RGB" and has_transparency:
mode = "RGBA"
if not mode or (mode == self.mode and not matrix):
return self.copy()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
msg = "illegal conversion"
raise ValueError(msg)
im = self.im.convert_matrix(mode, matrix)
new_im = self._new(im)
if has_transparency and self.im.bands == 3:
transparency = new_im.info["transparency"]
def convert_transparency(
m: tuple[float, ...], v: tuple[int, int, int]
) -> int:
value = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5
return max(0, min(255, int(value)))
if mode == "L":
transparency = convert_transparency(matrix, transparency)
elif len(mode) == 3:
transparency = tuple(
convert_transparency(matrix[i * 4 : i * 4 + 4], transparency)
for i in range(len(transparency))
)
new_im.info["transparency"] = transparency
return new_im
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if has_transparency:
if (self.mode in ("1", "L", "I", "I;16") and mode in ("LA", "RGBA")) or (
self.mode == "RGB" and mode in ("La", "LA", "RGBa", "RGBA")
):
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(
self.im.convert_transparent(mode, self.info["transparency"])
)
del new_im.info["transparency"]
return new_im
elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"):
t = self.info["transparency"]
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn(
"Palette images with Transparency expressed in bytes should be "
"converted to RGBA images"
)
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = new(self.mode, (1, 1))
if self.mode == "P":
assert self.palette is not None
trns_im.putpalette(self.palette, self.palette.mode)
if isinstance(t, tuple):
err = "Couldn't allocate a palette color for transparency"
assert trns_im.palette is not None
try:
t = trns_im.palette.getcolor(t, self)
except ValueError as e:
if str(e) == "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
t = None
else:
raise ValueError(err) from e
if t is None:
trns = None
else:
trns_im.putpixel((0, 0), t)
if mode in ("L", "RGB"):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert("RGB")
trns = trns_im.getpixel((0, 0))
elif self.mode == "P" and mode in ("LA", "PA", "RGBA"):
t = self.info["transparency"]
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
msg = "Transparency for P mode should be bytes or int"
raise ValueError(msg)
if mode == "P" and palette == Palette.ADAPTIVE:
im = self.im.quantize(colors)
new_im = self._new(im)
from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette(
"RGB", new_im.im.getpalette("RGB")
)
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del new_im.info["transparency"]
if trns is not None:
try:
new_im.info["transparency"] = new_im.palette.getcolor(
cast(tuple[int, ...], trns), # trns was converted to RGB
new_im,
)
except Exception:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del new_im.info["transparency"]
warnings.warn("Couldn't allocate palette entry for transparency")
return new_im
if "LAB" in (self.mode, mode):
im = self
if mode == "LAB":
if im.mode not in ("RGB", "RGBA", "RGBX"):
im = im.convert("RGBA")
other_mode = im.mode
else:
other_mode = mode
if other_mode in ("RGB", "RGBA", "RGBX"):
from . import ImageCms
srgb = ImageCms.createProfile("sRGB")
lab = ImageCms.createProfile("LAB")
profiles = [lab, srgb] if im.mode == "LAB" else [srgb, lab]
transform = ImageCms.buildTransform(
profiles[0], profiles[1], im.mode, mode
)
return transform.apply(im)
# colorspace conversion
if dither is None:
dither = Dither.FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
modebase = getmodebase(self.mode)
if modebase == self.mode:
raise
im = self.im.convert(modebase)
im = im.convert(mode, dither)
except KeyError as e:
msg = "illegal conversion"
raise ValueError(msg) from e
new_im = self._new(im)
if mode == "P" and palette != Palette.ADAPTIVE:
from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette("RGB", im.getpalette("RGB"))
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del new_im.info["transparency"]
if trns is not None:
if new_im.mode == "P" and new_im.palette:
try:
new_im.info["transparency"] = new_im.palette.getcolor(
cast(tuple[int, ...], trns), new_im # trns was converted to RGB
)
except ValueError as e:
del new_im.info["transparency"]
if str(e) != "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
warnings.warn(
"Couldn't allocate palette entry for transparency"
)
else:
new_im.info["transparency"] = trns
return new_im
def quantize(
self,
colors: int = 256,
method: int | None = None,
kmeans: int = 0,
palette: Image | None = None,
dither: Dither = Dither.FLOYDSTEINBERG,
) -> Image:
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: :data:`Quantize.MEDIANCUT` (median cut),
:data:`Quantize.MAXCOVERAGE` (maximum coverage),
:data:`Quantize.FASTOCTREE` (fast octree),
:data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support
using :py:func:`PIL.features.check_feature` with
``feature="libimagequant"``).
By default, :data:`Quantize.MEDIANCUT` will be used.
The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`
and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so
:data:`Quantize.FASTOCTREE` is used by default instead.
:param kmeans: Integer greater than or equal to zero.
:param palette: Quantize to the palette of given
:py:class:`PIL.Image.Image`.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default).
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = Quantize.MEDIANCUT
if self.mode == "RGBA":
method = Quantize.FASTOCTREE
if self.mode == "RGBA" and method not in (
Quantize.FASTOCTREE,
Quantize.LIBIMAGEQUANT,
):
# Caller specified an invalid mode.
msg = (
"Fast Octree (method == 2) and libimagequant (method == 3) "
"are the only valid methods for quantizing RGBA images"
)
raise ValueError(msg)
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
msg = "bad mode for palette image"
raise ValueError(msg)
if self.mode not in {"RGB", "L"}:
msg = "only RGB or L mode images can be quantized to a palette"
raise ValueError(msg)
im = self.im.convert("P", dither, palette.im)
new_im = self._new(im)
assert palette.palette is not None
new_im.palette = palette.palette.copy()
return new_im
if kmeans < 0:
msg = "kmeans must not be negative"
raise ValueError(msg)
im = self._new(self.im.quantize(colors, method, kmeans))
from . import ImagePalette
mode = im.im.getpalettemode()
palette_data = im.im.getpalette(mode, mode)[: colors * len(mode)]
im.palette = ImagePalette.ImagePalette(mode, palette_data)
return im
def copy(self) -> Image:
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
return self._new(self.im.copy())
__copy__ = copy
def crop(self, box: tuple[float, float, float, float] | None = None) -> Image:
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`.
Note: Prior to Pillow 3.4.0, this was a lazy operation.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if box is None:
return self.copy()
if box[2] < box[0]:
msg = "Coordinate 'right' is less than 'left'"
raise ValueError(msg)
elif box[3] < box[1]:
msg = "Coordinate 'lower' is less than 'upper'"
raise ValueError(msg)
self.load()
return self._new(self._crop(self.im, box))
def _crop(
self, im: core.ImagingCore, box: tuple[float, float, float, float]
) -> core.ImagingCore:
"""
Returns a rectangular region from the core image object im.
This is equivalent to calling im.crop((x0, y0, x1, y1)), but
includes additional sanity checks.
:param im: a core image object
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:returns: A core image object.
"""
x0, y0, x1, y1 = map(int, map(round, box))
absolute_values = (abs(x1 - x0), abs(y1 - y0))
_decompression_bomb_check(absolute_values)
return im.crop((x0, y0, x1, y1))
def draft(
self, mode: str | None, size: tuple[int, int] | None
) -> tuple[str, tuple[int, int, float, float]] | None:
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to grayscale while loading it.
If any changes are made, returns a tuple with the chosen ``mode`` and
``box`` with coordinates of the original image within the altered one.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and MPO images.
:param mode: The requested mode.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
pass
def _expand(self, xmargin: int, ymargin: int | None = None) -> Image:
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin))
def filter(self, filter: ImageFilter.Filter | type[ImageFilter.Filter]) -> Image:
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object."""
from . import ImageFilter
self.load()
if callable(filter):
filter = filter()
if not hasattr(filter, "filter"):
msg = "filter argument should be ImageFilter.Filter instance or class"
raise TypeError(msg)
multiband = isinstance(filter, ImageFilter.MultibandFilter)
if self.im.bands == 1 or multiband:
return self._new(filter.filter(self.im))
ims = [
self._new(filter.filter(self.im.getband(c))) for c in range(self.im.bands)
]
return merge(self.mode, ims)
def getbands(self) -> tuple[str, ...]:
"""
Returns a tuple containing the name of each band in this image.
For example, ``getbands`` on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self, *, alpha_only: bool = True) -> tuple[int, int, int, int] | None:
"""
Calculates the bounding box of the non-zero regions in the
image.
:param alpha_only: Optional flag, defaulting to ``True``.
If ``True`` and the image has an alpha channel, trim transparent pixels.
Otherwise, trim pixels when all channels are zero.
Keyword-only argument.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. See
:ref:`coordinate-system`. If the image is completely empty, this
method returns None.
"""
self.load()
return self.im.getbbox(alpha_only)
def getcolors(
self, maxcolors: int = 256
) -> list[tuple[int, tuple[int, ...]]] | list[tuple[int, float]] | None:
"""
Returns a list of colors used in this image.
The colors will be in the image's mode. For example, an RGB image will
return a tuple of (red, green, blue) color values, and a P image will
return the index of the color in the palette.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out: list[tuple[int, float]] = [(h[i], i) for i in range(256) if h[i]]
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band: int | None = None) -> core.ImagingCore:
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use ``list(im.getdata())``.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self) -> tuple[float, float] | tuple[tuple[int, int], ...]:
"""
Gets the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
return tuple(self.im.getband(i).getextrema() for i in range(self.im.bands))
return self.im.getextrema()
def getxmp(self) -> dict[str, Any]:
"""
Returns a dictionary containing the XMP tags.
Requires defusedxml to be installed.
:returns: XMP tags in a dictionary.
"""
def get_name(tag: str) -> str:
return re.sub("^{[^}]+}", "", tag)
def get_value(element: Element) -> str | dict[str, Any] | None:
value: dict[str, Any] = {get_name(k): v for k, v in element.attrib.items()}
children = list(element)
if children:
for child in children:
name = get_name(child.tag)
child_value = get_value(child)
if name in value:
if not isinstance(value[name], list):
value[name] = [value[name]]
value[name].append(child_value)
else:
value[name] = child_value
elif value:
if element.text:
value["text"] = element.text
else:
return element.text
return value
if ElementTree is None:
warnings.warn("XMP data cannot be read without defusedxml dependency")
return {}
if "xmp" not in self.info:
return {}
root = ElementTree.fromstring(self.info["xmp"].rstrip(b"\x00 "))
return {get_name(root.tag): get_value(root)}
def getexif(self) -> Exif:
"""
Gets EXIF data from the image.
:returns: an :py:class:`~PIL.Image.Exif` object.
"""
if self._exif is None:
self._exif = Exif()
elif self._exif._loaded:
return self._exif
self._exif._loaded = True
exif_info = self.info.get("exif")
if exif_info is None:
if "Raw profile type exif" in self.info:
exif_info = bytes.fromhex(
"".join(self.info["Raw profile type exif"].split("\n")[3:])
)
elif hasattr(self, "tag_v2"):
self._exif.bigtiff = self.tag_v2._bigtiff
self._exif.endian = self.tag_v2._endian
self._exif.load_from_fp(self.fp, self.tag_v2._offset)
if exif_info is not None:
self._exif.load(exif_info)
# XMP tags
if ExifTags.Base.Orientation not in self._exif:
xmp_tags = self.info.get("XML:com.adobe.xmp")
pattern: str | bytes = r'tiff:Orientation(="|>)([0-9])'
if not xmp_tags and (xmp_tags := self.info.get("xmp")):
pattern = rb'tiff:Orientation(="|>)([0-9])'
if xmp_tags:
match = re.search(pattern, xmp_tags)
if match:
self._exif[ExifTags.Base.Orientation] = int(match[2])
return self._exif
def _reload_exif(self) -> None:
if self._exif is None or not self._exif._loaded:
return
self._exif._loaded = False
self.getexif()
def get_child_images(self) -> list[ImageFile.ImageFile]:
from . import ImageFile
deprecate("Image.Image.get_child_images", 13)
return ImageFile.ImageFile.get_child_images(self) # type: ignore[arg-type]
def getim(self) -> CapsuleType:
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self, rawmode: str | None = "RGB") -> list[int] | None:
"""
Returns the image palette as a list.
:param rawmode: The mode in which to return the palette. ``None`` will
return the palette in its current mode.
.. versionadded:: 9.1.0
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
mode = self.im.getpalettemode()
except ValueError:
return None # no palette
if rawmode is None:
rawmode = mode
return list(self.im.getpalette(mode, rawmode))
@property
def has_transparency_data(self) -> bool:
"""
Determine if an image has transparency data, whether in the form of an
alpha channel, a palette with an alpha channel, or a "transparency" key
in the info dictionary.
Note the image might still appear solid, if all of the values shown
within are opaque.
:returns: A boolean.
"""
if (
self.mode in ("LA", "La", "PA", "RGBA", "RGBa")
or "transparency" in self.info
):
return True
if self.mode == "P":
assert self.palette is not None
return self.palette.mode.endswith("A")
return False
def apply_transparency(self) -> None:
"""
If a P mode image has a "transparency" key in the info dictionary,
remove the key and instead apply the transparency to the palette.
Otherwise, the image is unchanged.
"""
if self.mode != "P" or "transparency" not in self.info:
return
from . import ImagePalette
palette = self.getpalette("RGBA")
assert palette is not None
transparency = self.info["transparency"]
if isinstance(transparency, bytes):
for i, alpha in enumerate(transparency):
palette[i * 4 + 3] = alpha
else:
palette[transparency * 4 + 3] = 0
self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette))
self.palette.dirty = 1
del self.info["transparency"]
def getpixel(
self, xy: tuple[int, int] | list[int]
) -> float | tuple[int, ...] | None:
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y). See
:ref:`coordinate-system`.
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
return self.im.getpixel(tuple(xy))
def getprojection(self) -> tuple[list[int], list[int]]:
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return list(x), list(y)
def histogram(
self, mask: Image | None = None, extrema: tuple[float, float] | None = None
) -> list[int]:
"""
Returns a histogram for the image. The histogram is returned as a
list of pixel counts, one for each pixel value in the source
image. Counts are grouped into 256 bins for each band, even if
the image has more than 8 bits per band. If the image has more
than one band, the histograms for all bands are concatenated (for
example, the histogram for an "RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a grayscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a grayscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
return self.im.histogram(
extrema if extrema is not None else self.getextrema()
)
return self.im.histogram()
def entropy(
self, mask: Image | None = None, extrema: tuple[float, float] | None = None
) -> float:
"""
Calculates and returns the entropy for the image.
A bilevel image (mode "1") is treated as a grayscale ("L")
image by this method.
If a mask is provided, the method employs the histogram for
those parts of the image where the mask image is non-zero.
The mask image must have the same size as the image, and be
either a bi-level image (mode "1") or a grayscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A float value representing the image entropy
"""
self.load()
if mask:
mask.load()
return self.im.entropy((0, 0), mask.im)
if self.mode in ("I", "F"):
return self.im.entropy(
extrema if extrema is not None else self.getextrema()
)
return self.im.entropy()
def paste(
self,
im: Image | str | float | tuple[float, ...],
box: Image | tuple[int, int, int, int] | tuple[int, int] | None = None,
mask: Image | None = None,
) -> None:
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size
of the pasted image must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L", "LA", "RGBA"
or "RGBa" images (if present, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer, float or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isinstance(box, Image):
if mask is not None:
msg = "If using second argument as mask, third argument must be None"
raise ValueError(msg)
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
box = (0, 0)
if len(box) == 2:
# upper left corner given; get size from image or mask
if isinstance(im, Image):
size = im.size
elif isinstance(mask, Image):
size = mask.size
else:
# FIXME: use self.size here?
msg = "cannot determine region size; use 4-item box"
raise ValueError(msg)
box += (box[0] + size[0], box[1] + size[1])
source: core.ImagingCore | str | float | tuple[float, ...]
if isinstance(im, str):
from . import ImageColor
source = ImageColor.getcolor(im, self.mode)
elif isinstance(im, Image):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("LA", "RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
source = im.im
else:
source = im
self._ensure_mutable()
if mask:
mask.load()
self.im.paste(source, box, mask.im)
else:
self.im.paste(source, box)
def alpha_composite(
self, im: Image, dest: Sequence[int] = (0, 0), source: Sequence[int] = (0, 0)
) -> None:
"""'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
msg = "Source must be a list or tuple"
raise ValueError(msg)
if not isinstance(dest, (list, tuple)):
msg = "Destination must be a list or tuple"
raise ValueError(msg)
if len(source) == 4:
overlay_crop_box = tuple(source)
elif len(source) == 2:
overlay_crop_box = tuple(source) + im.size
else:
msg = "Source must be a sequence of length 2 or 4"
raise ValueError(msg)
if not len(dest) == 2:
msg = "Destination must be a sequence of length 2"
raise ValueError(msg)
if min(source) < 0:
msg = "Source must be non-negative"
raise ValueError(msg)
# over image, crop if it's not the whole image.
if overlay_crop_box == (0, 0) + im.size:
overlay = im
else:
overlay = im.crop(overlay_crop_box)
# target for the paste
box = tuple(dest) + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def point(
self,
lut: (
Sequence[float]
| NumpyArray
| Callable[[int], float]
| Callable[[ImagePointTransform], ImagePointTransform | float]
| ImagePointHandler
),
mode: str | None = None,
) -> Image:
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65536 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
It may also be an :py:class:`~PIL.Image.ImagePointHandler`
object::
class Example(Image.ImagePointHandler):
def point(self, im: Image) -> Image:
# Return result
:param mode: Output mode (default is same as input). This can only be used if
the source image has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut) # type: ignore[arg-type]
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
flatLut = [lut(i) for i in range(256)] * self.im.bands # type: ignore[arg-type]
else:
flatLut = lut
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
msg = "point operation not supported for this mode"
raise ValueError(msg)
if mode != "F":
flatLut = [round(i) for i in flatLut]
return self._new(self.im.point(flatLut, mode))
def putalpha(self, alpha: Image | int) -> None:
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer.
"""
self._ensure_mutable()
if self.mode not in ("LA", "PA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError) as e:
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "PA", "RGBA"):
msg = "alpha channel could not be added"
raise ValueError(msg) from e # sanity check
self.im = im
self._mode = self.im.mode
except KeyError as e:
msg = "illegal image mode"
raise ValueError(msg) from e
if self.mode in ("LA", "PA"):
band = 1
else:
band = 3
if isinstance(alpha, Image):
# alpha layer
if alpha.mode not in ("1", "L"):
msg = "illegal image mode"
raise ValueError(msg)
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(
self,
data: Sequence[float] | Sequence[Sequence[int]] | core.ImagingCore | NumpyArray,
scale: float = 1.0,
offset: float = 0.0,
) -> None:
"""
Copies pixel data from a flattened sequence object into the image. The
values should start at the upper left corner (0, 0), continue to the
end of the line, followed directly by the first value of the second
line, and so on. Data will be read until either the image or the
sequence ends. The scale and offset values are used to adjust the
sequence values: **pixel = value*scale + offset**.
:param data: A flattened sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self._ensure_mutable()
self.im.putdata(data, scale, offset)
def putpalette(
self,
data: ImagePalette.ImagePalette | bytes | Sequence[int],
rawmode: str = "RGB",
) -> None:
"""
Attaches a palette to this image. The image must be a "P", "PA", "L"
or "LA" image.
The palette sequence must contain at most 256 colors, made up of one
integer value for each channel in the raw mode.
For example, if the raw mode is "RGB", then it can contain at most 768
values, made up of red, green and blue values for the corresponding pixel
index in the 256 colors.
If the raw mode is "RGBA", then it can contain at most 1024 values,
containing red, green, blue and alpha values.
Alternatively, an 8-bit string may be used instead of an integer sequence.
:param data: A palette sequence (either a list or a string).
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a mode
that can be transformed to "RGB" or "RGBA" (e.g. "R", "BGR;15", "RGBA;L").
"""
from . import ImagePalette
if self.mode not in ("L", "LA", "P", "PA"):
msg = "illegal image mode"
raise ValueError(msg)
if isinstance(data, ImagePalette.ImagePalette):
if data.rawmode is not None:
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
palette = ImagePalette.ImagePalette(palette=data.palette)
palette.dirty = 1
else:
if not isinstance(data, bytes):
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self._mode = "PA" if "A" in self.mode else "P"
self.palette = palette
self.palette.mode = "RGBA" if "A" in rawmode else "RGB"
self.load() # install new palette
def putpixel(
self, xy: tuple[int, int], value: float | tuple[int, ...] | list[int]
) -> None:
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images. In addition to this, RGB and RGBA tuples are
accepted for P and PA images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y). See
:ref:`coordinate-system`.
:param value: The pixel value.
"""
if self.readonly:
self._copy()
self.load()
if (
self.mode in ("P", "PA")
and isinstance(value, (list, tuple))
and len(value) in [3, 4]
):
# RGB or RGBA value for a P or PA image
if self.mode == "PA":
alpha = value[3] if len(value) == 4 else 255
value = value[:3]
assert self.palette is not None
palette_index = self.palette.getcolor(tuple(value), self)
value = (palette_index, alpha) if self.mode == "PA" else palette_index
return self.im.putpixel(xy, value)
def remap_palette(
self, dest_map: list[int], source_palette: bytes | bytearray | None = None
) -> Image:
"""
Rewrites the image to reorder the palette.
:param dest_map: A list of indexes into the original palette.
e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))``
is the identity transform.
:param source_palette: Bytes or None.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
from . import ImagePalette
if self.mode not in ("L", "P"):
msg = "illegal image mode"
raise ValueError(msg)
bands = 3
palette_mode = "RGB"
if source_palette is None:
if self.mode == "P":
self.load()
palette_mode = self.im.getpalettemode()
if palette_mode == "RGBA":
bands = 4
source_palette = self.im.getpalette(palette_mode, palette_mode)
else: # L-mode
source_palette = bytearray(i // 3 for i in range(768))
elif len(source_palette) > 768:
bands = 4
palette_mode = "RGBA"
palette_bytes = b""
new_positions = [0] * 256
# pick only the used colors from the palette
for i, oldPosition in enumerate(dest_map):
palette_bytes += source_palette[
oldPosition * bands : oldPosition * bands + bands
]
new_positions[oldPosition] = i
# replace the palette color id of all pixel with the new id
# Palette images are [0..255], mapped through a 1 or 3
# byte/color map. We need to remap the whole image
# from palette 1 to palette 2. New_positions is
# an array of indexes into palette 1. Palette 2 is
# palette 1 with any holes removed.
# We're going to leverage the convert mechanism to use the
# C code to remap the image from palette 1 to palette 2,
# by forcing the source image into 'L' mode and adding a
# mapping 'L' mode palette, then converting back to 'L'
# sans palette thus converting the image bytes, then
# assigning the optimized RGB palette.
# perf reference, 9500x4000 gif, w/~135 colors
# 14 sec prepatch, 1 sec postpatch with optimization forced.
mapping_palette = bytearray(new_positions)
m_im = self.copy()
m_im._mode = "P"
m_im.palette = ImagePalette.ImagePalette(
palette_mode, palette=mapping_palette * bands
)
# possibly set palette dirty, then
# m_im.putpalette(mapping_palette, 'L') # converts to 'P'
# or just force it.
# UNDONE -- this is part of the general issue with palettes
m_im.im.putpalette(palette_mode, palette_mode + ";L", m_im.palette.tobytes())
m_im = m_im.convert("L")
m_im.putpalette(palette_bytes, palette_mode)
m_im.palette = ImagePalette.ImagePalette(palette_mode, palette=palette_bytes)
if "transparency" in self.info:
try:
m_im.info["transparency"] = dest_map.index(self.info["transparency"])
except ValueError:
if "transparency" in m_im.info:
del m_im.info["transparency"]
return m_im
def _get_safe_box(
self,
size: tuple[int, int],
resample: Resampling,
box: tuple[float, float, float, float],
) -> tuple[int, int, int, int]:
"""Expands the box so it includes adjacent pixels
that may be used by resampling with the given resampling filter.
"""
filter_support = _filters_support[resample] - 0.5
scale_x = (box[2] - box[0]) / size[0]
scale_y = (box[3] - box[1]) / size[1]
support_x = filter_support * scale_x
support_y = filter_support * scale_y
return (
max(0, int(box[0] - support_x)),
max(0, int(box[1] - support_y)),
min(self.size[0], math.ceil(box[2] + support_x)),
min(self.size[1], math.ceil(box[3] + support_y)),
)
def resize(
self,
size: tuple[int, int] | list[int] | NumpyArray,
resample: int | None = None,
box: tuple[float, float, float, float] | None = None,
reducing_gap: float | None = None,
) -> Image:
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a tuple or array:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If the image has mode "1" or "P", it is always set to
:py:data:`Resampling.NEAREST`. If the image mode is "BGR;15",
"BGR;16" or "BGR;24", then the default filter is
:py:data:`Resampling.NEAREST`. Otherwise, the default filter is
:py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing
the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle.
If omitted or None, the entire source is used.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce`.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is None (no optimization).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample is None:
bgr = self.mode.startswith("BGR;")
resample = Resampling.NEAREST if bgr else Resampling.BICUBIC
elif resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
Resampling.LANCZOS,
Resampling.BOX,
Resampling.HAMMING,
):
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
(Resampling.BOX, "Image.Resampling.BOX"),
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
)
]
msg += f" Use {', '.join(filters[:-1])} or {filters[-1]}"
raise ValueError(msg)
if reducing_gap is not None and reducing_gap < 1.0:
msg = "reducing_gap must be 1.0 or greater"
raise ValueError(msg)
if box is None:
box = (0, 0) + self.size
size = tuple(size)
if self.size == size and box == (0, 0) + self.size:
return self.copy()
if self.mode in ("1", "P"):
resample = Resampling.NEAREST
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.resize(size, resample, box)
return im.convert(self.mode)
self.load()
if reducing_gap is not None and resample != Resampling.NEAREST:
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
if factor_x > 1 or factor_y > 1:
reduce_box = self._get_safe_box(size, cast(Resampling, resample), box)
factor = (factor_x, factor_y)
self = (
self.reduce(factor, box=reduce_box)
if callable(self.reduce)
else Image.reduce(self, factor, box=reduce_box)
)
box = (
(box[0] - reduce_box[0]) / factor_x,
(box[1] - reduce_box[1]) / factor_y,
(box[2] - reduce_box[0]) / factor_x,
(box[3] - reduce_box[1]) / factor_y,
)
return self._new(self.im.resize(size, resample, box))
def reduce(
self,
factor: int | tuple[int, int],
box: tuple[int, int, int, int] | None = None,
) -> Image:
"""
Returns a copy of the image reduced ``factor`` times.
If the size of the image is not dividable by ``factor``,
the resulting size will be rounded up.
:param factor: A greater than 0 integer or tuple of two integers
for width and height separately.
:param box: An optional 4-tuple of ints providing
the source image region to be reduced.
The values must be within ``(0, 0, width, height)`` rectangle.
If omitted or ``None``, the entire source is used.
"""
if not isinstance(factor, (list, tuple)):
factor = (factor, factor)
if box is None:
box = (0, 0) + self.size
if factor == (1, 1) and box == (0, 0) + self.size:
return self.copy()
if self.mode in ["LA", "RGBA"]:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.reduce(factor, box)
return im.convert(self.mode)
self.load()
return self._new(self.im.reduce(factor, box))
def rotate(
self,
angle: float,
resample: Resampling = Resampling.NEAREST,
expand: int | bool = False,
center: tuple[float, float] | None = None,
translate: tuple[int, int] | None = None,
fillcolor: float | tuple[float, ...] | str | None = None,
) -> Image:
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image has
mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:param fillcolor: An optional color for area outside the rotated image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
# Fast paths regardless of filter, as long as we're not
# translating or changing the center.
if not (center or translate):
if angle == 0:
return self.copy()
if angle == 180:
return self.transpose(Transpose.ROTATE_180)
if angle in (90, 270) and (expand or self.width == self.height):
return self.transpose(
Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270
)
# Calculate the affine matrix. Note that this is the reverse
# transformation (from destination image to source) because we
# want to interpolate the (discrete) destination pixel from
# the local area around the (floating) source pixel.
# The matrix we actually want (note that it operates from the right):
# (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx)
# (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy)
# (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1)
# The reverse matrix is thus:
# (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx)
# (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty)
# (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1)
# In any case, the final translation may be updated at the end to
# compensate for the expand flag.
w, h = self.size
if translate is None:
post_trans = (0, 0)
else:
post_trans = translate
if center is None:
center = (w / 2, h / 2)
angle = -math.radians(angle)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x: float, y: float, matrix: list[float]) -> tuple[float, float]:
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-center[0] - post_trans[0], -center[1] - post_trans[1], matrix
)
matrix[2] += center[0]
matrix[5] += center[1]
if expand:
# calculate output size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
transformed_x, transformed_y = transform(x, y, matrix)
xx.append(transformed_x)
yy.append(transformed_y)
nw = math.ceil(max(xx)) - math.floor(min(xx))
nh = math.ceil(max(yy)) - math.floor(min(yy))
# We multiply a translation matrix from the right. Because of its
# special form, this is the same as taking the image of the
# translation vector as new translation vector.
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
w, h = nw, nh
return self.transform(
(w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor
)
def save(
self, fp: StrOrBytesPath | IO[bytes], format: str | None = None, **params: Any
) -> None:
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), os.PathLike object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer. These can also be
set on the image itself through ``encoderinfo``. This is useful when
saving multiple images::
# Saving XMP data to a single image
from PIL import Image
red = Image.new("RGB", (1, 1), "#f00")
red.save("out.mpo", xmp=b"test")
# Saving XMP data to the second frame of an image
from PIL import Image
black = Image.new("RGB", (1, 1))
red = Image.new("RGB", (1, 1), "#f00")
red.encoderinfo = {"xmp": b"test"}
black.save("out.mpo", save_all=True, append_images=[red])
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception OSError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename: str | bytes = ""
open_fp = False
if is_path(fp):
filename = os.fspath(fp)
open_fp = True
elif fp == sys.stdout:
try:
fp = sys.stdout.buffer
except AttributeError:
pass
if not filename and hasattr(fp, "name") and is_path(fp.name):
# only set the name for metadata purposes
filename = os.fspath(fp.name)
preinit()
filename_ext = os.path.splitext(filename)[1].lower()
ext = filename_ext.decode() if isinstance(filename_ext, bytes) else filename_ext
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError as e:
msg = f"unknown file extension: {ext}"
raise ValueError(msg) from e
from . import ImageFile
# may mutate self!
if isinstance(self, ImageFile.ImageFile) and os.path.abspath(
filename
) == os.path.abspath(self.filename):
self._ensure_mutable()
else:
self.load()
save_all = params.pop("save_all", None)
self._default_encoderinfo = params
encoderinfo = getattr(self, "encoderinfo", {})
self._attach_default_encoderinfo(self)
self.encoderconfig: tuple[Any, ...] = ()
if format.upper() not in SAVE:
init()
if save_all or (
save_all is None
and params.get("append_images")
and format.upper() in SAVE_ALL
):
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
created = False
if open_fp:
created = not os.path.exists(filename)
if params.get("append", False):
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
fp = builtins.open(filename, "r+b")
else:
fp = builtins.open(filename, "w+b")
else:
fp = cast(IO[bytes], fp)
try:
save_handler(self, fp, filename)
except Exception:
if open_fp:
fp.close()
if created:
try:
os.remove(filename)
except PermissionError:
pass
raise
finally:
self.encoderinfo = encoderinfo
if open_fp:
fp.close()
def _attach_default_encoderinfo(self, im: Image) -> dict[str, Any]:
encoderinfo = getattr(self, "encoderinfo", {})
self.encoderinfo = {**im._default_encoderinfo, **encoderinfo}
return encoderinfo
def seek(self, frame: int) -> None:
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
``EOFError`` exception. When a sequence file is opened, the
library automatically seeks to frame 0.
See :py:meth:`~PIL.Image.Image.tell`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
msg = "no more images in file"
raise EOFError(msg)
def show(self, title: str | None = None) -> None:
"""
Displays this image. This method is mainly intended for debugging purposes.
This method calls :py:func:`PIL.ImageShow.show` internally. You can use
:py:func:`PIL.ImageShow.register` to override its default behaviour.
The image is first saved to a temporary file. By default, it will be in
PNG format.
On Unix, the image is then opened using the **xdg-open**, **display**,
**gm**, **eog** or **xv** utility, depending on which one can be found.
On macOS, the image is opened with the native Preview application.
On Windows, the image is opened with the standard PNG display utility.
:param title: Optional title to use for the image window, where possible.
"""
_show(self, title=title)
def split(self) -> tuple[Image, ...]:
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
If you need only one band, :py:meth:`~PIL.Image.Image.getchannel`
method can be more convenient and faster.
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
return (self.copy(),)
return tuple(map(self._new, self.im.split()))
def getchannel(self, channel: int | str) -> Image:
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
self.load()
if isinstance(channel, str):
try:
channel = self.getbands().index(channel)
except ValueError as e:
msg = f'The image has no channel "{channel}"'
raise ValueError(msg) from e
return self._new(self.im.getband(channel))
def tell(self) -> int:
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(
self,
size: tuple[float, float],
resample: Resampling = Resampling.BICUBIC,
reducing_gap: float | None = 2.0,
) -> None:
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: Optional resampling filter. This can be one
of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`Resampling.BICUBIC`.
(was :py:data:`Resampling.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce` or
:py:meth:`~PIL.Image.Image.draft` for JPEG images.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is 2.0 (very close to fair resampling
while still being faster in many cases).
:returns: None
"""
provided_size = tuple(map(math.floor, size))
def preserve_aspect_ratio() -> tuple[int, int] | None:
def round_aspect(number: float, key: Callable[[int], float]) -> int:
return max(min(math.floor(number), math.ceil(number), key=key), 1)
x, y = provided_size
if x >= self.width and y >= self.height:
return None
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
return x, y
preserved_size = preserve_aspect_ratio()
if preserved_size is None:
return
final_size = preserved_size
box = None
if reducing_gap is not None:
res = self.draft(
None, (int(size[0] * reducing_gap), int(size[1] * reducing_gap))
)
if res is not None:
box = res[1]
if self.size != final_size:
im = self.resize(final_size, resample, box=box, reducing_gap=reducing_gap)
self.im = im.im
self._size = final_size
self._mode = self.im.mode
self.readonly = 0
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(
self,
size: tuple[int, int],
method: Transform | ImageTransformHandler | SupportsGetData,
data: Sequence[Any] | None = None,
resample: int = Resampling.NEAREST,
fill: int = 1,
fillcolor: float | tuple[float, ...] | str | None = None,
) -> Image:
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size in pixels, as a 2-tuple:
(width, height).
:param method: The transformation method. This is one of
:py:data:`Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`Transform.AFFINE` (affine transform),
:py:data:`Transform.PERSPECTIVE` (perspective transform),
:py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`Transform.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(self, size, data, resample, fill=1):
# Return result
Implementations of :py:class:`~PIL.Image.ImageTransformHandler`
for some of the :py:class:`Transform` methods are provided
in :py:mod:`~PIL.ImageTransform`.
It may also be an object with a ``method.getdata`` method
that returns a tuple supplying new ``method`` and ``data`` values::
class Example:
def getdata(self):
method = Image.Transform.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See: :ref:`concept-filters`.
:param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST:
return (
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
.transform(size, method, data, resample, fill, fillcolor)
.convert(self.mode)
)
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
msg = "missing method data"
raise ValueError(msg)
im = new(self.mode, size, fillcolor)
if self.mode == "P" and self.palette:
im.palette = self.palette.copy()
im.info = self.info.copy()
if method == Transform.MESH:
# list of quads
for box, quad in data:
im.__transformer(
box, self, Transform.QUAD, quad, resample, fillcolor is None
)
else:
im.__transformer(
(0, 0) + size, self, method, data, resample, fillcolor is None
)
return im
def __transformer(
self,
box: tuple[int, int, int, int],
image: Image,
method: Transform,
data: Sequence[float],
resample: int = Resampling.NEAREST,
fill: bool = True,
) -> None:
w = box[2] - box[0]
h = box[3] - box[1]
if method == Transform.AFFINE:
data = data[:6]
elif method == Transform.EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = (x1 - x0) / w
ys = (y1 - y0) / h
method = Transform.AFFINE
data = (xs, 0, x0, 0, ys, y0)
elif method == Transform.PERSPECTIVE:
data = data[:8]
elif method == Transform.QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (
x0,
(ne[0] - x0) * As,
(sw[0] - x0) * At,
(se[0] - sw[0] - ne[0] + x0) * As * At,
y0,
(ne[1] - y0) * As,
(sw[1] - y0) * At,
(se[1] - sw[1] - ne[1] + y0) * As * At,
)
else:
msg = "unknown transformation method"
raise ValueError(msg)
if resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
):
if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS):
unusable: dict[int, str] = {
Resampling.BOX: "Image.Resampling.BOX",
Resampling.HAMMING: "Image.Resampling.HAMMING",
Resampling.LANCZOS: "Image.Resampling.LANCZOS",
}
msg = unusable[resample] + f" ({resample}) cannot be used."
else:
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
)
]
msg += f" Use {', '.join(filters[:-1])} or {filters[-1]}"
raise ValueError(msg)
image.load()
self.load()
if image.mode in ("1", "P"):
resample = Resampling.NEAREST
self.im.transform(box, image.im, method, data, resample, fill)
def transpose(self, method: Transpose) -> Image:
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`,
:py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`,
:py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`,
:py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance: int) -> Image:
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
def toqimage(self) -> ImageQt.ImageQt:
"""Returns a QImage copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqimage(self)
def toqpixmap(self) -> ImageQt.QPixmap:
"""Returns a QPixmap copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqpixmap(self)
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler(abc.ABC):
"""
Used as a mixin by point transforms
(for use with :py:meth:`~PIL.Image.Image.point`)
"""
@abc.abstractmethod
def point(self, im: Image) -> Image:
pass
class ImageTransformHandler(abc.ABC):
"""
Used as a mixin by geometry transforms
(for use with :py:meth:`~PIL.Image.Image.transform`)
"""
@abc.abstractmethod
def transform(
self,
size: tuple[int, int],
image: Image,
**options: Any,
) -> Image:
pass
# --------------------------------------------------------------------
# Factories
def _check_size(size: Any) -> None:
"""
Common check to enforce type and sanity check on size tuples
:param size: Should be a 2 tuple of (width, height)
:returns: None, or raises a ValueError
"""
if not isinstance(size, (list, tuple)):
msg = "Size must be a list or tuple"
raise ValueError(msg)
if len(size) != 2:
msg = "Size must be a sequence of length 2"
raise ValueError(msg)
if size[0] < 0 or size[1] < 0:
msg = "Width and height must be >= 0"
raise ValueError(msg)
def new(
mode: str,
size: tuple[int, int] | list[int],
color: float | tuple[float, ...] | str | None = 0,
) -> Image:
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB or HSV images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if mode in ("BGR;15", "BGR;16", "BGR;24"):
deprecate(mode, 12)
_check_size(size)
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isinstance(color, str):
# css3-style specifier
from . import ImageColor
color = ImageColor.getcolor(color, mode)
im = Image()
if (
mode == "P"
and isinstance(color, (list, tuple))
and all(isinstance(i, int) for i in color)
):
color_ints: tuple[int, ...] = cast(tuple[int, ...], tuple(color))
if len(color_ints) == 3 or len(color_ints) == 4:
# RGB or RGBA value for a P image
from . import ImagePalette
im.palette = ImagePalette.ImagePalette()
color = im.palette.getcolor(color_ints)
return im._new(core.fill(mode, size, color))
def frombytes(
mode: str,
size: tuple[int, int],
data: bytes | bytearray | SupportsArrayInterface,
decoder_name: str = "raw",
*args: Any,
) -> Image:
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Codec `.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
im = new(mode, size)
if im.width != 0 and im.height != 0:
decoder_args: Any = args
if len(decoder_args) == 1 and isinstance(decoder_args[0], tuple):
# may pass tuple instead of argument list
decoder_args = decoder_args[0]
if decoder_name == "raw" and decoder_args == ():
decoder_args = mode
im.frombytes(data, decoder_name, decoder_args)
return im
def frombuffer(
mode: str,
size: tuple[int, int],
data: bytes | SupportsArrayInterface,
decoder_name: str = "raw",
*args: Any,
) -> Image:
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load it.
The default parameters used for the "raw" decoder differs from that used for
:py:func:`~PIL.Image.frombytes`. This is a bug, and will probably be fixed in a
future release. The current release issues a warning if you do this; to disable
the warning, you should provide the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
args = mode, 0, 1
if args[0] in _MAPMODES:
im = new(mode, (0, 0))
im = im._new(core.map_buffer(data, size, decoder_name, 0, args))
if mode == "P":
from . import ImagePalette
im.palette = ImagePalette.ImagePalette("RGB", im.im.getpalette("RGB"))
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
class SupportsArrayInterface(Protocol):
"""
An object that has an ``__array_interface__`` dictionary.
"""
@property
def __array_interface__(self) -> dict[str, Any]:
raise NotImplementedError()
class SupportsArrowArrayInterface(Protocol):
"""
An object that has an ``__arrow_c_array__`` method corresponding to the arrow c
data interface.
"""
def __arrow_c_array__(
self, requested_schema: "PyCapsule" = None # type: ignore[name-defined] # noqa: F821, UP037
) -> tuple["PyCapsule", "PyCapsule"]: # type: ignore[name-defined] # noqa: F821, UP037
raise NotImplementedError()
def fromarray(obj: SupportsArrayInterface, mode: str | None = None) -> Image:
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol)::
from PIL import Image
import numpy as np
a = np.zeros((5, 5))
im = Image.fromarray(a)
If ``obj`` is not contiguous, then the ``tobytes`` method is called
and :py:func:`~PIL.Image.frombuffer` is used.
In the case of NumPy, be aware that Pillow modes do not always correspond
to NumPy dtypes. Pillow modes only offer 1-bit pixels, 8-bit pixels,
32-bit signed integer pixels, and 32-bit floating point pixels.
Pillow images can also be converted to arrays::
from PIL import Image
import numpy as np
im = Image.open("hopper.jpg")
a = np.asarray(im)
When converting Pillow images to arrays however, only pixel values are
transferred. This means that P and PA mode images will lose their palette.
:param obj: Object with array interface
:param mode: Optional mode to use when reading ``obj``. Will be determined from
type if ``None``. Deprecated.
This will not be used to convert the data after reading, but will be used to
change how the data is read::
from PIL import Image
import numpy as np
a = np.full((1, 1), 300)
im = Image.fromarray(a, mode="L")
im.getpixel((0, 0)) # 44
im = Image.fromarray(a, mode="RGB")
im.getpixel((0, 0)) # (44, 1, 0)
See: :ref:`concept-modes` for general information about modes.
:returns: An image object.
.. versionadded:: 1.1.6
"""
arr = obj.__array_interface__
shape = arr["shape"]
ndim = len(shape)
strides = arr.get("strides", None)
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr["typestr"]
except KeyError as e:
msg = "Cannot handle this data type"
raise TypeError(msg) from e
try:
mode, rawmode = _fromarray_typemap[typekey]
except KeyError as e:
typekey_shape, typestr = typekey
msg = f"Cannot handle this data type: {typekey_shape}, {typestr}"
raise TypeError(msg) from e
else:
deprecate("'mode' parameter", 13)
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
msg = f"Too many dimensions: {ndim} > {ndmax}."
raise ValueError(msg)
size = 1 if ndim == 1 else shape[1], shape[0]
if strides is not None:
if hasattr(obj, "tobytes"):
obj = obj.tobytes()
elif hasattr(obj, "tostring"):
obj = obj.tostring()
else:
msg = "'strides' requires either tobytes() or tostring()"
raise ValueError(msg)
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
def fromarrow(
obj: SupportsArrowArrayInterface, mode: str, size: tuple[int, int]
) -> Image:
"""Creates an image with zero-copy shared memory from an object exporting
the arrow_c_array interface protocol::
from PIL import Image
import pyarrow as pa
arr = pa.array([0]*(5*5*4), type=pa.uint8())
im = Image.fromarrow(arr, 'RGBA', (5, 5))
If the data representation of the ``obj`` is not compatible with
Pillow internal storage, a ValueError is raised.
Pillow images can also be converted to Arrow objects::
from PIL import Image
import pyarrow as pa
im = Image.open('hopper.jpg')
arr = pa.array(im)
As with array support, when converting Pillow images to arrays,
only pixel values are transferred. This means that P and PA mode
images will lose their palette.
:param obj: Object with an arrow_c_array interface
:param mode: Image mode.
:param size: Image size. This must match the storage of the arrow object.
:returns: An Image object
Note that according to the Arrow spec, both the producer and the
consumer should consider the exported array to be immutable, as
unsynchronized updates will potentially cause inconsistent data.
See: :ref:`arrow-support` for more detailed information
.. versionadded:: 11.2.1
"""
if not hasattr(obj, "__arrow_c_array__"):
msg = "arrow_c_array interface not found"
raise ValueError(msg)
(schema_capsule, array_capsule) = obj.__arrow_c_array__()
_im = core.new_arrow(mode, size, schema_capsule, array_capsule)
if _im:
return Image()._new(_im)
msg = "new_arrow returned None without an exception"
raise ValueError(msg)
def fromqimage(im: ImageQt.QImage) -> ImageFile.ImageFile:
"""Creates an image instance from a QImage image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.fromqimage(im)
def fromqpixmap(im: ImageQt.QPixmap) -> ImageFile.ImageFile:
"""Creates an image instance from a QPixmap image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.fromqpixmap(im)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
((1, 1), "|b1"): ("1", "1;8"),
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "u2"): ("I", "I;16B"),
((1, 1), "i2"): ("I", "I;16BS"),
((1, 1), "u4"): ("I", "I;32B"),
((1, 1), "i4"): ("I", "I;32BS"),
((1, 1), "f4"): ("F", "F;32BF"),
((1, 1), "f8"): ("F", "F;64BF"),
((1, 1, 2), "|u1"): ("LA", "LA"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
# shortcuts:
((1, 1), f"{_ENDIAN}i4"): ("I", "I"),
((1, 1), f"{_ENDIAN}f4"): ("F", "F"),
}
def _decompression_bomb_check(size: tuple[int, int]) -> None:
if MAX_IMAGE_PIXELS is None:
return
pixels = max(1, size[0]) * max(1, size[1])
if pixels > 2 * MAX_IMAGE_PIXELS:
msg = (
f"Image size ({pixels} pixels) exceeds limit of {2 * MAX_IMAGE_PIXELS} "
"pixels, could be decompression bomb DOS attack."
)
raise DecompressionBombError(msg)
if pixels > MAX_IMAGE_PIXELS:
warnings.warn(
f"Image size ({pixels} pixels) exceeds limit of {MAX_IMAGE_PIXELS} pixels, "
"could be decompression bomb DOS attack.",
DecompressionBombWarning,
)
def open(
fp: StrOrBytesPath | IO[bytes],
mode: Literal["r"] = "r",
formats: list[str] | tuple[str, ...] | None = None,
) -> ImageFile.ImageFile:
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`. See :ref:`file-handling`.
:param fp: A filename (string), os.PathLike object or a file object.
The file object must implement ``file.read``,
``file.seek``, and ``file.tell`` methods,
and be opened in binary mode. The file object will also seek to zero
before reading.
:param mode: The mode. If given, this argument must be "r".
:param formats: A list or tuple of formats to attempt to load the file in.
This can be used to restrict the set of formats checked.
Pass ``None`` to try all supported formats. You can print the set of
available formats by running ``python3 -m PIL`` or using
the :py:func:`PIL.features.pilinfo` function.
:returns: An :py:class:`~PIL.Image.Image` object.
:exception FileNotFoundError: If the file cannot be found.
:exception PIL.UnidentifiedImageError: If the image cannot be opened and
identified.
:exception ValueError: If the ``mode`` is not "r", or if a ``StringIO``
instance is used for ``fp``.
:exception TypeError: If ``formats`` is not ``None``, a list or a tuple.
"""
if mode != "r":
msg = f"bad mode {repr(mode)}" # type: ignore[unreachable]
raise ValueError(msg)
elif isinstance(fp, io.StringIO):
msg = ( # type: ignore[unreachable]
"StringIO cannot be used to open an image. "
"Binary data must be used instead."
)
raise ValueError(msg)
if formats is None:
formats = ID
elif not isinstance(formats, (list, tuple)):
msg = "formats must be a list or tuple" # type: ignore[unreachable]
raise TypeError(msg)
exclusive_fp = False
filename: str | bytes = ""
if is_path(fp):
filename = os.fspath(fp)
fp = builtins.open(filename, "rb")
exclusive_fp = True
else:
fp = cast(IO[bytes], fp)
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
exclusive_fp = True
prefix = fp.read(16)
preinit()
warning_messages: list[str] = []
def _open_core(
fp: IO[bytes],
filename: str | bytes,
prefix: bytes,
formats: list[str] | tuple[str, ...],
) -> ImageFile.ImageFile | None:
for i in formats:
i = i.upper()
if i not in OPEN:
init()
try:
factory, accept = OPEN[i]
result = not accept or accept(prefix)
if isinstance(result, str):
warning_messages.append(result)
elif result:
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error) as e:
if WARN_POSSIBLE_FORMATS:
warning_messages.append(i + " opening failed. " + str(e))
except BaseException:
if exclusive_fp:
fp.close()
raise
return None
im = _open_core(fp, filename, prefix, formats)
if im is None and formats is ID:
checked_formats = ID.copy()
if init():
im = _open_core(
fp,
filename,
prefix,
tuple(format for format in formats if format not in checked_formats),
)
if im:
im._exclusive_fp = exclusive_fp
return im
if exclusive_fp:
fp.close()
for message in warning_messages:
warnings.warn(message)
msg = "cannot identify image file %r" % (filename if filename else fp)
raise UnidentifiedImageError(msg)
#
# Image processing.
def alpha_composite(im1: Image, im2: Image) -> Image:
"""
Alpha composite im2 over im1.
:param im1: The first image. Must have mode RGBA.
:param im2: The second image. Must have mode RGBA, and the same size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.alpha_composite(im1.im, im2.im))
def blend(im1: Image, im2: Image, alpha: float) -> Image:
"""
Creates a new image by interpolating between two input images, using
a constant alpha::
out = image1 * (1.0 - alpha) + image2 * alpha
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:param alpha: The interpolation alpha factor. If alpha is 0.0, a
copy of the first image is returned. If alpha is 1.0, a copy of
the second image is returned. There are no restrictions on the
alpha value. If necessary, the result is clipped to fit into
the allowed output range.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
def composite(image1: Image, image2: Image, mask: Image) -> Image:
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
def eval(image: Image, *args: Callable[[int], float]) -> Image:
"""
Applies the function (which should take one argument) to each pixel
in the given image. If the image has more than one band, the same
function is applied to each band. Note that the function is
evaluated once for each possible pixel value, so you cannot use
random components or other generators.
:param image: The input image.
:param function: A function object, taking one integer argument.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
return image.point(args[0])
def merge(mode: str, bands: Sequence[Image]) -> Image:
"""
Merge a set of single band images into a new multiband image.
:param mode: The mode to use for the output image. See:
:ref:`concept-modes`.
:param bands: A sequence containing one single-band image for
each band in the output image. All bands must have the
same size.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if getmodebands(mode) != len(bands) or "*" in mode:
msg = "wrong number of bands"
raise ValueError(msg)
for band in bands[1:]:
if band.mode != getmodetype(mode):
msg = "mode mismatch"
raise ValueError(msg)
if band.size != bands[0].size:
msg = "size mismatch"
raise ValueError(msg)
for band in bands:
band.load()
return bands[0]._new(core.merge(mode, *[b.im for b in bands]))
# --------------------------------------------------------------------
# Plugin registry
def register_open(
id: str,
factory: (
Callable[[IO[bytes], str | bytes], ImageFile.ImageFile]
| type[ImageFile.ImageFile]
),
accept: Callable[[bytes], bool | str] | None = None,
) -> None:
"""
Register an image file plugin. This function should not be used
in application code.
:param id: An image format identifier.
:param factory: An image file factory method.
:param accept: An optional function that can be used to quickly
reject images having another format.
"""
id = id.upper()
if id not in ID:
ID.append(id)
OPEN[id] = factory, accept
def register_mime(id: str, mimetype: str) -> None:
"""
Registers an image MIME type by populating ``Image.MIME``. This function
should not be used in application code.
``Image.MIME`` provides a mapping from image format identifiers to mime
formats, but :py:meth:`~PIL.ImageFile.ImageFile.get_format_mimetype` can
provide a different result for specific images.
:param id: An image format identifier.
:param mimetype: The image MIME type for this format.
"""
MIME[id.upper()] = mimetype
def register_save(
id: str, driver: Callable[[Image, IO[bytes], str | bytes], None]
) -> None:
"""
Registers an image save function. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE[id.upper()] = driver
def register_save_all(
id: str, driver: Callable[[Image, IO[bytes], str | bytes], None]
) -> None:
"""
Registers an image function to save all the frames
of a multiframe format. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE_ALL[id.upper()] = driver
def register_extension(id: str, extension: str) -> None:
"""
Registers an image extension. This function should not be
used in application code.
:param id: An image format identifier.
:param extension: An extension used for this format.
"""
EXTENSION[extension.lower()] = id.upper()
def register_extensions(id: str, extensions: list[str]) -> None:
"""
Registers image extensions. This function should not be
used in application code.
:param id: An image format identifier.
:param extensions: A list of extensions used for this format.
"""
for extension in extensions:
register_extension(id, extension)
def registered_extensions() -> dict[str, str]:
"""
Returns a dictionary containing all file extensions belonging
to registered plugins
"""
init()
return EXTENSION
def register_decoder(name: str, decoder: type[ImageFile.PyDecoder]) -> None:
"""
Registers an image decoder. This function should not be
used in application code.
:param name: The name of the decoder
:param decoder: An ImageFile.PyDecoder object
.. versionadded:: 4.1.0
"""
DECODERS[name] = decoder
def register_encoder(name: str, encoder: type[ImageFile.PyEncoder]) -> None:
"""
Registers an image encoder. This function should not be
used in application code.
:param name: The name of the encoder
:param encoder: An ImageFile.PyEncoder object
.. versionadded:: 4.1.0
"""
ENCODERS[name] = encoder
# --------------------------------------------------------------------
# Simple display support.
def _show(image: Image, **options: Any) -> None:
from . import ImageShow
ImageShow.show(image, **options)
# --------------------------------------------------------------------
# Effects
def effect_mandelbrot(
size: tuple[int, int], extent: tuple[float, float, float, float], quality: int
) -> Image:
"""
Generate a Mandelbrot set covering the given extent.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param extent: The extent to cover, as a 4-tuple:
(x0, y0, x1, y1).
:param quality: Quality.
"""
return Image()._new(core.effect_mandelbrot(size, extent, quality))
def effect_noise(size: tuple[int, int], sigma: float) -> Image:
"""
Generate Gaussian noise centered around 128.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param sigma: Standard deviation of noise.
"""
return Image()._new(core.effect_noise(size, sigma))
def linear_gradient(mode: str) -> Image:
"""
Generate 256x256 linear gradient from black to white, top to bottom.
:param mode: Input mode.
"""
return Image()._new(core.linear_gradient(mode))
def radial_gradient(mode: str) -> Image:
"""
Generate 256x256 radial gradient from black to white, centre to edge.
:param mode: Input mode.
"""
return Image()._new(core.radial_gradient(mode))
# --------------------------------------------------------------------
# Resources
def _apply_env_variables(env: dict[str, str] | None = None) -> None:
env_dict = env if env is not None else os.environ
for var_name, setter in [
("PILLOW_ALIGNMENT", core.set_alignment),
("PILLOW_BLOCK_SIZE", core.set_block_size),
("PILLOW_BLOCKS_MAX", core.set_blocks_max),
]:
if var_name not in env_dict:
continue
var = env_dict[var_name].lower()
units = 1
for postfix, mul in [("k", 1024), ("m", 1024 * 1024)]:
if var.endswith(postfix):
units = mul
var = var[: -len(postfix)]
try:
var_int = int(var) * units
except ValueError:
warnings.warn(f"{var_name} is not int")
continue
try:
setter(var_int)
except ValueError as e:
warnings.warn(f"{var_name}: {e}")
_apply_env_variables()
atexit.register(core.clear_cache)
if TYPE_CHECKING:
_ExifBase = MutableMapping[int, Any]
else:
_ExifBase = MutableMapping
class Exif(_ExifBase):
"""
This class provides read and write access to EXIF image data::
from PIL import Image
im = Image.open("exif.png")
exif = im.getexif() # Returns an instance of this class
Information can be read and written, iterated over or deleted::
print(exif[274]) # 1
exif[274] = 2
for k, v in exif.items():
print("Tag", k, "Value", v) # Tag 274 Value 2
del exif[274]
To access information beyond IFD0, :py:meth:`~PIL.Image.Exif.get_ifd`
returns a dictionary::
from PIL import ExifTags
im = Image.open("exif_gps.jpg")
exif = im.getexif()
gps_ifd = exif.get_ifd(ExifTags.IFD.GPSInfo)
print(gps_ifd)
Other IFDs include ``ExifTags.IFD.Exif``, ``ExifTags.IFD.MakerNote``,
``ExifTags.IFD.Interop`` and ``ExifTags.IFD.IFD1``.
:py:mod:`~PIL.ExifTags` also has enum classes to provide names for data::
print(exif[ExifTags.Base.Software]) # PIL
print(gps_ifd[ExifTags.GPS.GPSDateStamp]) # 1999:99:99 99:99:99
"""
endian: str | None = None
bigtiff = False
_loaded = False
def __init__(self) -> None:
self._data: dict[int, Any] = {}
self._hidden_data: dict[int, Any] = {}
self._ifds: dict[int, dict[int, Any]] = {}
self._info: TiffImagePlugin.ImageFileDirectory_v2 | None = None
self._loaded_exif: bytes | None = None
def _fixup(self, value: Any) -> Any:
try:
if len(value) == 1 and isinstance(value, tuple):
return value[0]
except Exception:
pass
return value
def _fixup_dict(self, src_dict: dict[int, Any]) -> dict[int, Any]:
# Helper function
# returns a dict with any single item tuples/lists as individual values
return {k: self._fixup(v) for k, v in src_dict.items()}
def _get_ifd_dict(
self, offset: int, group: int | None = None
) -> dict[int, Any] | None:
try:
# an offset pointer to the location of the nested embedded IFD.
# It should be a long, but may be corrupted.
self.fp.seek(offset)
except (KeyError, TypeError):
return None
else:
from . import TiffImagePlugin
info = TiffImagePlugin.ImageFileDirectory_v2(self.head, group=group)
info.load(self.fp)
return self._fixup_dict(dict(info))
def _get_head(self) -> bytes:
version = b"\x2b" if self.bigtiff else b"\x2a"
if self.endian == "<":
head = b"II" + version + b"\x00" + o32le(8)
else:
head = b"MM\x00" + version + o32be(8)
if self.bigtiff:
head += o32le(8) if self.endian == "<" else o32be(8)
head += b"\x00\x00\x00\x00"
return head
def load(self, data: bytes) -> None:
# Extract EXIF information. This is highly experimental,
# and is likely to be replaced with something better in a future
# version.
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
if data == self._loaded_exif:
return
self._loaded_exif = data
self._data.clear()
self._hidden_data.clear()
self._ifds.clear()
while data and data.startswith(b"Exif\x00\x00"):
data = data[6:]
if not data:
self._info = None
return
self.fp: IO[bytes] = io.BytesIO(data)
self.head = self.fp.read(8)
# process dictionary
from . import TiffImagePlugin
self._info = TiffImagePlugin.ImageFileDirectory_v2(self.head)
self.endian = self._info._endian
self.fp.seek(self._info.next)
self._info.load(self.fp)
def load_from_fp(self, fp: IO[bytes], offset: int | None = None) -> None:
self._loaded_exif = None
self._data.clear()
self._hidden_data.clear()
self._ifds.clear()
# process dictionary
from . import TiffImagePlugin
self.fp = fp
if offset is not None:
self.head = self._get_head()
else:
self.head = self.fp.read(8)
self._info = TiffImagePlugin.ImageFileDirectory_v2(self.head)
if self.endian is None:
self.endian = self._info._endian
if offset is None:
offset = self._info.next
self.fp.tell()
self.fp.seek(offset)
self._info.load(self.fp)
def _get_merged_dict(self) -> dict[int, Any]:
merged_dict = dict(self)
# get EXIF extension
if ExifTags.IFD.Exif in self:
ifd = self._get_ifd_dict(self[ExifTags.IFD.Exif], ExifTags.IFD.Exif)
if ifd:
merged_dict.update(ifd)
# GPS
if ExifTags.IFD.GPSInfo in self:
merged_dict[ExifTags.IFD.GPSInfo] = self._get_ifd_dict(
self[ExifTags.IFD.GPSInfo], ExifTags.IFD.GPSInfo
)
return merged_dict
def tobytes(self, offset: int = 8) -> bytes:
from . import TiffImagePlugin
head = self._get_head()
ifd = TiffImagePlugin.ImageFileDirectory_v2(ifh=head)
for tag, ifd_dict in self._ifds.items():
if tag not in self:
ifd[tag] = ifd_dict
for tag, value in self.items():
if tag in [
ExifTags.IFD.Exif,
ExifTags.IFD.GPSInfo,
] and not isinstance(value, dict):
value = self.get_ifd(tag)
if (
tag == ExifTags.IFD.Exif
and ExifTags.IFD.Interop in value
and not isinstance(value[ExifTags.IFD.Interop], dict)
):
value = value.copy()
value[ExifTags.IFD.Interop] = self.get_ifd(ExifTags.IFD.Interop)
ifd[tag] = value
return b"Exif\x00\x00" + head + ifd.tobytes(offset)
def get_ifd(self, tag: int) -> dict[int, Any]:
if tag not in self._ifds:
if tag == ExifTags.IFD.IFD1:
if self._info is not None and self._info.next != 0:
ifd = self._get_ifd_dict(self._info.next)
if ifd is not None:
self._ifds[tag] = ifd
elif tag in [ExifTags.IFD.Exif, ExifTags.IFD.GPSInfo]:
offset = self._hidden_data.get(tag, self.get(tag))
if offset is not None:
ifd = self._get_ifd_dict(offset, tag)
if ifd is not None:
self._ifds[tag] = ifd
elif tag in [ExifTags.IFD.Interop, ExifTags.IFD.MakerNote]:
if ExifTags.IFD.Exif not in self._ifds:
self.get_ifd(ExifTags.IFD.Exif)
tag_data = self._ifds[ExifTags.IFD.Exif][tag]
if tag == ExifTags.IFD.MakerNote:
from .TiffImagePlugin import ImageFileDirectory_v2
if tag_data.startswith(b"FUJIFILM"):
ifd_offset = i32le(tag_data, 8)
ifd_data = tag_data[ifd_offset:]
makernote = {}
for i in range(struct.unpack(" 4:
(offset,) = struct.unpack("H", tag_data[:2])[0]):
ifd_tag, typ, count, data = struct.unpack(
">HHL4s", tag_data[i * 12 + 2 : (i + 1) * 12 + 2]
)
if ifd_tag == 0x1101:
# CameraInfo
(offset,) = struct.unpack(">L", data)
self.fp.seek(offset)
camerainfo: dict[str, int | bytes] = {
"ModelID": self.fp.read(4)
}
self.fp.read(4)
# Seconds since 2000
camerainfo["TimeStamp"] = i32le(self.fp.read(12))
self.fp.read(4)
camerainfo["InternalSerialNumber"] = self.fp.read(4)
self.fp.read(12)
parallax = self.fp.read(4)
handler = ImageFileDirectory_v2._load_dispatch[
TiffTags.FLOAT
][1]
camerainfo["Parallax"] = handler(
ImageFileDirectory_v2(), parallax, False
)[0]
self.fp.read(4)
camerainfo["Category"] = self.fp.read(2)
makernote = {0x1101: camerainfo}
self._ifds[tag] = makernote
else:
# Interop
ifd = self._get_ifd_dict(tag_data, tag)
if ifd is not None:
self._ifds[tag] = ifd
ifd = self._ifds.setdefault(tag, {})
if tag == ExifTags.IFD.Exif and self._hidden_data:
ifd = {
k: v
for (k, v) in ifd.items()
if k not in (ExifTags.IFD.Interop, ExifTags.IFD.MakerNote)
}
return ifd
def hide_offsets(self) -> None:
for tag in (ExifTags.IFD.Exif, ExifTags.IFD.GPSInfo):
if tag in self:
self._hidden_data[tag] = self[tag]
del self[tag]
def __str__(self) -> str:
if self._info is not None:
# Load all keys into self._data
for tag in self._info:
self[tag]
return str(self._data)
def __len__(self) -> int:
keys = set(self._data)
if self._info is not None:
keys.update(self._info)
return len(keys)
def __getitem__(self, tag: int) -> Any:
if self._info is not None and tag not in self._data and tag in self._info:
self._data[tag] = self._fixup(self._info[tag])
del self._info[tag]
return self._data[tag]
def __contains__(self, tag: object) -> bool:
return tag in self._data or (self._info is not None and tag in self._info)
def __setitem__(self, tag: int, value: Any) -> None:
if self._info is not None and tag in self._info:
del self._info[tag]
self._data[tag] = value
def __delitem__(self, tag: int) -> None:
if self._info is not None and tag in self._info:
del self._info[tag]
else:
del self._data[tag]
def __iter__(self) -> Iterator[int]:
keys = set(self._data)
if self._info is not None:
keys.update(self._info)
return iter(keys)
venv\Lib\site-packages\PIL\ImageChops.py
#
# The Python Imaging Library.
# $Id$
#
# standard channel operations
#
# History:
# 1996-03-24 fl Created
# 1996-08-13 fl Added logical operations (for "1" images)
# 2000-10-12 fl Added offset method (from Image.py)
#
# Copyright (c) 1997-2000 by Secret Labs AB
# Copyright (c) 1996-2000 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image
def constant(image: Image.Image, value: int) -> Image.Image:
"""Fill a channel with a given gray level.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.new("L", image.size, value)
def duplicate(image: Image.Image) -> Image.Image:
"""Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`.
:rtype: :py:class:`~PIL.Image.Image`
"""
return image.copy()
def invert(image: Image.Image) -> Image.Image:
"""
Invert an image (channel). ::
out = MAX - image
:rtype: :py:class:`~PIL.Image.Image`
"""
image.load()
return image._new(image.im.chop_invert())
def lighter(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Compares the two images, pixel by pixel, and returns a new image containing
the lighter values. ::
out = max(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
def darker(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Compares the two images, pixel by pixel, and returns a new image containing
the darker values. ::
out = min(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
def difference(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Returns the absolute value of the pixel-by-pixel difference between the two
images. ::
out = abs(image1 - image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_difference(image2.im))
def multiply(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Superimposes two images on top of each other.
If you multiply an image with a solid black image, the result is black. If
you multiply with a solid white image, the image is unaffected. ::
out = image1 * image2 / MAX
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im))
def screen(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Superimposes two inverted images on top of each other. ::
out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
def soft_light(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Superimposes two images on top of each other using the Soft Light algorithm
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_soft_light(image2.im))
def hard_light(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Superimposes two images on top of each other using the Hard Light algorithm
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_hard_light(image2.im))
def overlay(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""
Superimposes two images on top of each other using the Overlay algorithm
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_overlay(image2.im))
def add(
image1: Image.Image, image2: Image.Image, scale: float = 1.0, offset: float = 0
) -> Image.Image:
"""
Adds two images, dividing the result by scale and adding the
offset. If omitted, scale defaults to 1.0, and offset to 0.0. ::
out = ((image1 + image2) / scale + offset)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_add(image2.im, scale, offset))
def subtract(
image1: Image.Image, image2: Image.Image, scale: float = 1.0, offset: float = 0
) -> Image.Image:
"""
Subtracts two images, dividing the result by scale and adding the offset.
If omitted, scale defaults to 1.0, and offset to 0.0. ::
out = ((image1 - image2) / scale + offset)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
def add_modulo(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""Add two images, without clipping the result. ::
out = ((image1 + image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_add_modulo(image2.im))
def subtract_modulo(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""Subtract two images, without clipping the result. ::
out = ((image1 - image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
def logical_and(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""Logical AND between two images.
Both of the images must have mode "1". If you would like to perform a
logical AND on an image with a mode other than "1", try
:py:meth:`~PIL.ImageChops.multiply` instead, using a black-and-white mask
as the second image. ::
out = ((image1 and image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
def logical_or(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""Logical OR between two images.
Both of the images must have mode "1". ::
out = ((image1 or image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_or(image2.im))
def logical_xor(image1: Image.Image, image2: Image.Image) -> Image.Image:
"""Logical XOR between two images.
Both of the images must have mode "1". ::
out = ((bool(image1) != bool(image2)) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_xor(image2.im))
def blend(image1: Image.Image, image2: Image.Image, alpha: float) -> Image.Image:
"""Blend images using constant transparency weight. Alias for
:py:func:`PIL.Image.blend`.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(image1, image2, alpha)
def composite(
image1: Image.Image, image2: Image.Image, mask: Image.Image
) -> Image.Image:
"""Create composite using transparency mask. Alias for
:py:func:`PIL.Image.composite`.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.composite(image1, image2, mask)
def offset(image: Image.Image, xoffset: int, yoffset: int | None = None) -> Image.Image:
"""Returns a copy of the image where data has been offset by the given
distances. Data wraps around the edges. If ``yoffset`` is omitted, it
is assumed to be equal to ``xoffset``.
:param image: Input image.
:param xoffset: The horizontal distance.
:param yoffset: The vertical distance. If omitted, both
distances are set to the same value.
:rtype: :py:class:`~PIL.Image.Image`
"""
if yoffset is None:
yoffset = xoffset
image.load()
return image._new(image.im.offset(xoffset, yoffset))
venv\Lib\site-packages\PIL\ImageCms.py
# The Python Imaging Library.
# $Id$
# Optional color management support, based on Kevin Cazabon's PyCMS
# library.
# Originally released under LGPL. Graciously donated to PIL in
# March 2009, for distribution under the standard PIL license
# History:
# 2009-03-08 fl Added to PIL.
# Copyright (C) 2002-2003 Kevin Cazabon
# Copyright (c) 2009 by Fredrik Lundh
# Copyright (c) 2013 by Eric Soroos
# See the README file for information on usage and redistribution. See
# below for the original description.
from __future__ import annotations
import operator
import sys
from enum import IntEnum, IntFlag
from functools import reduce
from typing import Any, Literal, SupportsFloat, SupportsInt, Union
from . import Image, __version__
from ._deprecate import deprecate
from ._typing import SupportsRead
try:
from . import _imagingcms as core
_CmsProfileCompatible = Union[
str, SupportsRead[bytes], core.CmsProfile, "ImageCmsProfile"
]
except ImportError as ex:
# Allow error import for doc purposes, but error out when accessing
# anything in core.
from ._util import DeferredError
core = DeferredError.new(ex)
_DESCRIPTION = """
pyCMS
a Python / PIL interface to the littleCMS ICC Color Management System
Copyright (C) 2002-2003 Kevin Cazabon
kevin@cazabon.com
https://www.cazabon.com
pyCMS home page: https://www.cazabon.com/pyCMS
littleCMS home page: https://www.littlecms.com
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
Originally released under LGPL. Graciously donated to PIL in
March 2009, for distribution under the standard PIL license
The pyCMS.py module provides a "clean" interface between Python/PIL and
pyCMSdll, taking care of some of the more complex handling of the direct
pyCMSdll functions, as well as error-checking and making sure that all
relevant data is kept together.
While it is possible to call pyCMSdll functions directly, it's not highly
recommended.
Version History:
1.0.0 pil Oct 2013 Port to LCMS 2.
0.1.0 pil mod March 10, 2009
Renamed display profile to proof profile. The proof
profile is the profile of the device that is being
simulated, not the profile of the device which is
actually used to display/print the final simulation
(that'd be the output profile) - also see LCMSAPI.txt
input colorspace -> using 'renderingIntent' -> proof
colorspace -> using 'proofRenderingIntent' -> output
colorspace
Added LCMS FLAGS support.
Added FLAGS["SOFTPROOFING"] as default flag for
buildProofTransform (otherwise the proof profile/intent
would be ignored).
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
0.0.2 alpha Jan 6, 2002
Added try/except statements around type() checks of
potential CObjects... Python won't let you use type()
on them, and raises a TypeError (stupid, if you ask
me!)
Added buildProofTransformFromOpenProfiles() function.
Additional fixes in DLL, see DLL code for details.
0.0.1 alpha first public release, Dec. 26, 2002
Known to-do list with current version (of Python interface, not pyCMSdll):
none
"""
_VERSION = "1.0.0 pil"
def __getattr__(name: str) -> Any:
if name == "DESCRIPTION":
deprecate("PIL.ImageCms.DESCRIPTION", 12)
return _DESCRIPTION
elif name == "VERSION":
deprecate("PIL.ImageCms.VERSION", 12)
return _VERSION
elif name == "FLAGS":
deprecate("PIL.ImageCms.FLAGS", 12, "PIL.ImageCms.Flags")
return _FLAGS
msg = f"module '{__name__}' has no attribute '{name}'"
raise AttributeError(msg)
# --------------------------------------------------------------------.
#
# intent/direction values
class Intent(IntEnum):
PERCEPTUAL = 0
RELATIVE_COLORIMETRIC = 1
SATURATION = 2
ABSOLUTE_COLORIMETRIC = 3
class Direction(IntEnum):
INPUT = 0
OUTPUT = 1
PROOF = 2
#
# flags
class Flags(IntFlag):
"""Flags and documentation are taken from ``lcms2.h``."""
NONE = 0
NOCACHE = 0x0040
"""Inhibit 1-pixel cache"""
NOOPTIMIZE = 0x0100
"""Inhibit optimizations"""
NULLTRANSFORM = 0x0200
"""Don't transform anyway"""
GAMUTCHECK = 0x1000
"""Out of Gamut alarm"""
SOFTPROOFING = 0x4000
"""Do softproofing"""
BLACKPOINTCOMPENSATION = 0x2000
NOWHITEONWHITEFIXUP = 0x0004
"""Don't fix scum dot"""
HIGHRESPRECALC = 0x0400
"""Use more memory to give better accuracy"""
LOWRESPRECALC = 0x0800
"""Use less memory to minimize resources"""
# this should be 8BITS_DEVICELINK, but that is not a valid name in Python:
USE_8BITS_DEVICELINK = 0x0008
"""Create 8 bits devicelinks"""
GUESSDEVICECLASS = 0x0020
"""Guess device class (for ``transform2devicelink``)"""
KEEP_SEQUENCE = 0x0080
"""Keep profile sequence for devicelink creation"""
FORCE_CLUT = 0x0002
"""Force CLUT optimization"""
CLUT_POST_LINEARIZATION = 0x0001
"""create postlinearization tables if possible"""
CLUT_PRE_LINEARIZATION = 0x0010
"""create prelinearization tables if possible"""
NONEGATIVES = 0x8000
"""Prevent negative numbers in floating point transforms"""
COPY_ALPHA = 0x04000000
"""Alpha channels are copied on ``cmsDoTransform()``"""
NODEFAULTRESOURCEDEF = 0x01000000
_GRIDPOINTS_1 = 1 << 16
_GRIDPOINTS_2 = 2 << 16
_GRIDPOINTS_4 = 4 << 16
_GRIDPOINTS_8 = 8 << 16
_GRIDPOINTS_16 = 16 << 16
_GRIDPOINTS_32 = 32 << 16
_GRIDPOINTS_64 = 64 << 16
_GRIDPOINTS_128 = 128 << 16
@staticmethod
def GRIDPOINTS(n: int) -> Flags:
"""
Fine-tune control over number of gridpoints
:param n: :py:class:`int` in range ``0 <= n <= 255``
"""
return Flags.NONE | ((n & 0xFF) << 16)
_MAX_FLAG = reduce(operator.or_, Flags)
_FLAGS = {
"MATRIXINPUT": 1,
"MATRIXOUTPUT": 2,
"MATRIXONLY": (1 | 2),
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
# Don't create prelinearization tables on precalculated transforms
# (internal use):
"NOPRELINEARIZATION": 16,
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
"NOTCACHE": 64, # Inhibit 1-pixel cache
"NOTPRECALC": 256,
"NULLTRANSFORM": 512, # Don't transform anyway
"HIGHRESPRECALC": 1024, # Use more memory to give better accuracy
"LOWRESPRECALC": 2048, # Use less memory to minimize resources
"WHITEBLACKCOMPENSATION": 8192,
"BLACKPOINTCOMPENSATION": 8192,
"GAMUTCHECK": 4096, # Out of Gamut alarm
"SOFTPROOFING": 16384, # Do softproofing
"PRESERVEBLACK": 32768, # Black preservation
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
"GRIDPOINTS": lambda n: (n & 0xFF) << 16, # Gridpoints
}
# --------------------------------------------------------------------.
# Experimental PIL-level API
# --------------------------------------------------------------------.
##
# Profile.
class ImageCmsProfile:
def __init__(self, profile: str | SupportsRead[bytes] | core.CmsProfile) -> None:
"""
:param profile: Either a string representing a filename,
a file like object containing a profile or a
low-level profile object
"""
self.filename = None
self.product_name = None # profile.product_name
self.product_info = None # profile.product_info
if isinstance(profile, str):
if sys.platform == "win32":
profile_bytes_path = profile.encode()
try:
profile_bytes_path.decode("ascii")
except UnicodeDecodeError:
with open(profile, "rb") as f:
self.profile = core.profile_frombytes(f.read())
return
self.filename = profile
self.profile = core.profile_open(profile)
elif hasattr(profile, "read"):
self.profile = core.profile_frombytes(profile.read())
elif isinstance(profile, core.CmsProfile):
self.profile = profile
else:
msg = "Invalid type for Profile" # type: ignore[unreachable]
raise TypeError(msg)
def tobytes(self) -> bytes:
"""
Returns the profile in a format suitable for embedding in
saved images.
:returns: a bytes object containing the ICC profile.
"""
return core.profile_tobytes(self.profile)
class ImageCmsTransform(Image.ImagePointHandler):
"""
Transform. This can be used with the procedural API, or with the standard
:py:func:`~PIL.Image.Image.point` method.
Will return the output profile in the ``output.info['icc_profile']``.
"""
def __init__(
self,
input: ImageCmsProfile,
output: ImageCmsProfile,
input_mode: str,
output_mode: str,
intent: Intent = Intent.PERCEPTUAL,
proof: ImageCmsProfile | None = None,
proof_intent: Intent = Intent.ABSOLUTE_COLORIMETRIC,
flags: Flags = Flags.NONE,
):
supported_modes = (
"RGB",
"RGBA",
"RGBX",
"CMYK",
"I;16",
"I;16L",
"I;16B",
"YCbCr",
"LAB",
"L",
"1",
)
for mode in (input_mode, output_mode):
if mode not in supported_modes:
deprecate(
mode,
12,
{
"L;16": "I;16 or I;16L",
"L:16B": "I;16B",
"YCCA": "YCbCr",
"YCC": "YCbCr",
}.get(mode),
)
if proof is None:
self.transform = core.buildTransform(
input.profile, output.profile, input_mode, output_mode, intent, flags
)
else:
self.transform = core.buildProofTransform(
input.profile,
output.profile,
proof.profile,
input_mode,
output_mode,
intent,
proof_intent,
flags,
)
# Note: inputMode and outputMode are for pyCMS compatibility only
self.input_mode = self.inputMode = input_mode
self.output_mode = self.outputMode = output_mode
self.output_profile = output
def point(self, im: Image.Image) -> Image.Image:
return self.apply(im)
def apply(self, im: Image.Image, imOut: Image.Image | None = None) -> Image.Image:
if imOut is None:
imOut = Image.new(self.output_mode, im.size, None)
self.transform.apply(im.getim(), imOut.getim())
imOut.info["icc_profile"] = self.output_profile.tobytes()
return imOut
def apply_in_place(self, im: Image.Image) -> Image.Image:
if im.mode != self.output_mode:
msg = "mode mismatch"
raise ValueError(msg) # wrong output mode
self.transform.apply(im.getim(), im.getim())
im.info["icc_profile"] = self.output_profile.tobytes()
return im
def get_display_profile(handle: SupportsInt | None = None) -> ImageCmsProfile | None:
"""
(experimental) Fetches the profile for the current display device.
:returns: ``None`` if the profile is not known.
"""
if sys.platform != "win32":
return None
from . import ImageWin # type: ignore[unused-ignore, unreachable]
if isinstance(handle, ImageWin.HDC):
profile = core.get_display_profile_win32(int(handle), 1)
else:
profile = core.get_display_profile_win32(int(handle or 0))
if profile is None:
return None
return ImageCmsProfile(profile)
# --------------------------------------------------------------------.
# pyCMS compatible layer
# --------------------------------------------------------------------.
class PyCMSError(Exception):
"""(pyCMS) Exception class.
This is used for all errors in the pyCMS API."""
pass
def profileToProfile(
im: Image.Image,
inputProfile: _CmsProfileCompatible,
outputProfile: _CmsProfileCompatible,
renderingIntent: Intent = Intent.PERCEPTUAL,
outputMode: str | None = None,
inPlace: bool = False,
flags: Flags = Flags.NONE,
) -> Image.Image | None:
"""
(pyCMS) Applies an ICC transformation to a given image, mapping from
``inputProfile`` to ``outputProfile``.
If the input or output profiles specified are not valid filenames, a
:exc:`PyCMSError` will be raised. If ``inPlace`` is ``True`` and
``outputMode != im.mode``, a :exc:`PyCMSError` will be raised.
If an error occurs during application of the profiles,
a :exc:`PyCMSError` will be raised.
If ``outputMode`` is not a mode supported by the ``outputProfile`` (or by pyCMS),
a :exc:`PyCMSError` will be raised.
This function applies an ICC transformation to im from ``inputProfile``'s
color space to ``outputProfile``'s color space using the specified rendering
intent to decide how to handle out-of-gamut colors.
``outputMode`` can be used to specify that a color mode conversion is to
be done using these profiles, but the specified profiles must be able
to handle that mode. I.e., if converting im from RGB to CMYK using
profiles, the input profile must handle RGB data, and the output
profile must handle CMYK data.
:param im: An open :py:class:`~PIL.Image.Image` object (i.e. Image.new(...)
or Image.open(...), etc.)
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this image, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this image, or a profile object
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param outputMode: A valid PIL mode for the output image (i.e. "RGB",
"CMYK", etc.). Note: if rendering the image "inPlace", outputMode
MUST be the same mode as the input, or omitted completely. If
omitted, the outputMode will be the same as the mode of the input
image (im.mode)
:param inPlace: Boolean. If ``True``, the original image is modified in-place,
and ``None`` is returned. If ``False`` (default), a new
:py:class:`~PIL.Image.Image` object is returned with the transform applied.
:param flags: Integer (0-...) specifying additional flags
:returns: Either None or a new :py:class:`~PIL.Image.Image` object, depending on
the value of ``inPlace``
:exception PyCMSError:
"""
if outputMode is None:
outputMode = im.mode
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
msg = "renderingIntent must be an integer between 0 and 3"
raise PyCMSError(msg)
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
msg = f"flags must be an integer between 0 and {_MAX_FLAG}"
raise PyCMSError(msg)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(
inputProfile,
outputProfile,
im.mode,
outputMode,
renderingIntent,
flags=flags,
)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
return imOut
def getOpenProfile(
profileFilename: str | SupportsRead[bytes] | core.CmsProfile,
) -> ImageCmsProfile:
"""
(pyCMS) Opens an ICC profile file.
The PyCMSProfile object can be passed back into pyCMS for use in creating
transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
If ``profileFilename`` is not a valid filename for an ICC profile,
a :exc:`PyCMSError` will be raised.
:param profileFilename: String, as a valid filename path to the ICC profile
you wish to open, or a file-like object.
:returns: A CmsProfile class object.
:exception PyCMSError:
"""
try:
return ImageCmsProfile(profileFilename)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def buildTransform(
inputProfile: _CmsProfileCompatible,
outputProfile: _CmsProfileCompatible,
inMode: str,
outMode: str,
renderingIntent: Intent = Intent.PERCEPTUAL,
flags: Flags = Flags.NONE,
) -> ImageCmsTransform:
"""
(pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
``outputProfile``. Use applyTransform to apply the transform to a given
image.
If the input or output profiles specified are not valid filenames, a
:exc:`PyCMSError` will be raised. If an error occurs during creation
of the transform, a :exc:`PyCMSError` will be raised.
If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
(or by pyCMS), a :exc:`PyCMSError` will be raised.
This function builds and returns an ICC transform from the ``inputProfile``
to the ``outputProfile`` using the ``renderingIntent`` to determine what to do
with out-of-gamut colors. It will ONLY work for converting images that
are in ``inMode`` to images that are in ``outMode`` color format (PIL mode,
i.e. "RGB", "RGBA", "CMYK", etc.).
Building the transform is a fair part of the overhead in
ImageCms.profileToProfile(), so if you're planning on converting multiple
images using the same input/output settings, this can save you time.
Once you have a transform object, it can be used with
ImageCms.applyProfile() to convert images without the need to re-compute
the lookup table for the transform.
The reason pyCMS returns a class object rather than a handle directly
to the transform is that it needs to keep track of the PIL input/output
modes that the transform is meant for. These attributes are stored in
the ``inMode`` and ``outMode`` attributes of the object (which can be
manually overridden if you really want to, but I don't know of any
time that would be of use, or would even work).
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
msg = "renderingIntent must be an integer between 0 and 3"
raise PyCMSError(msg)
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
msg = f"flags must be an integer between 0 and {_MAX_FLAG}"
raise PyCMSError(msg)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
return ImageCmsTransform(
inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags
)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def buildProofTransform(
inputProfile: _CmsProfileCompatible,
outputProfile: _CmsProfileCompatible,
proofProfile: _CmsProfileCompatible,
inMode: str,
outMode: str,
renderingIntent: Intent = Intent.PERCEPTUAL,
proofRenderingIntent: Intent = Intent.ABSOLUTE_COLORIMETRIC,
flags: Flags = Flags.SOFTPROOFING,
) -> ImageCmsTransform:
"""
(pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
``outputProfile``, but tries to simulate the result that would be
obtained on the ``proofProfile`` device.
If the input, output, or proof profiles specified are not valid
filenames, a :exc:`PyCMSError` will be raised.
If an error occurs during creation of the transform,
a :exc:`PyCMSError` will be raised.
If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
(or by pyCMS), a :exc:`PyCMSError` will be raised.
This function builds and returns an ICC transform from the ``inputProfile``
to the ``outputProfile``, but tries to simulate the result that would be
obtained on the ``proofProfile`` device using ``renderingIntent`` and
``proofRenderingIntent`` to determine what to do with out-of-gamut
colors. This is known as "soft-proofing". It will ONLY work for
converting images that are in ``inMode`` to images that are in outMode
color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
Usage of the resulting transform object is exactly the same as with
ImageCms.buildTransform().
Proof profiling is generally used when using an output device to get a
good idea of what the final printed/displayed image would look like on
the ``proofProfile`` device when it's quicker and easier to use the
output device for judging color. Generally, this means that the
output device is a monitor, or a dye-sub printer (etc.), and the simulated
device is something more expensive, complicated, or time consuming
(making it difficult to make a real print for color judgement purposes).
Soft-proofing basically functions by adjusting the colors on the
output device to match the colors of the device being simulated. However,
when the simulated device has a much wider gamut than the output
device, you may obtain marginal results.
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
(monitor, usually) profile you wish to use for this transform, or a
profile object
:param proofProfile: String, as a valid filename path to the ICC proof
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the input->proof (simulated) transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent
you wish to use for proof->output transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
msg = "renderingIntent must be an integer between 0 and 3"
raise PyCMSError(msg)
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
msg = f"flags must be an integer between 0 and {_MAX_FLAG}"
raise PyCMSError(msg)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
if not isinstance(proofProfile, ImageCmsProfile):
proofProfile = ImageCmsProfile(proofProfile)
return ImageCmsTransform(
inputProfile,
outputProfile,
inMode,
outMode,
renderingIntent,
proofProfile,
proofRenderingIntent,
flags,
)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
buildTransformFromOpenProfiles = buildTransform
buildProofTransformFromOpenProfiles = buildProofTransform
def applyTransform(
im: Image.Image, transform: ImageCmsTransform, inPlace: bool = False
) -> Image.Image | None:
"""
(pyCMS) Applies a transform to a given image.
If ``im.mode != transform.input_mode``, a :exc:`PyCMSError` is raised.
If ``inPlace`` is ``True`` and ``transform.input_mode != transform.output_mode``, a
:exc:`PyCMSError` is raised.
If ``im.mode``, ``transform.input_mode`` or ``transform.output_mode`` is not
supported by pyCMSdll or the profiles you used for the transform, a
:exc:`PyCMSError` is raised.
If an error occurs while the transform is being applied,
a :exc:`PyCMSError` is raised.
This function applies a pre-calculated transform (from
ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles())
to an image. The transform can be used for multiple images, saving
considerable calculation time if doing the same conversion multiple times.
If you want to modify im in-place instead of receiving a new image as
the return value, set ``inPlace`` to ``True``. This can only be done if
``transform.input_mode`` and ``transform.output_mode`` are the same, because we
can't change the mode in-place (the buffer sizes for some modes are
different). The default behavior is to return a new :py:class:`~PIL.Image.Image`
object of the same dimensions in mode ``transform.output_mode``.
:param im: An :py:class:`~PIL.Image.Image` object, and ``im.mode`` must be the same
as the ``input_mode`` supported by the transform.
:param transform: A valid CmsTransform class object
:param inPlace: Bool. If ``True``, ``im`` is modified in place and ``None`` is
returned, if ``False``, a new :py:class:`~PIL.Image.Image` object with the
transform applied is returned (and ``im`` is not changed). The default is
``False``.
:returns: Either ``None``, or a new :py:class:`~PIL.Image.Image` object,
depending on the value of ``inPlace``. The profile will be returned in
the image's ``info['icc_profile']``.
:exception PyCMSError:
"""
try:
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (TypeError, ValueError) as v:
raise PyCMSError(v) from v
return imOut
def createProfile(
colorSpace: Literal["LAB", "XYZ", "sRGB"], colorTemp: SupportsFloat = 0
) -> core.CmsProfile:
"""
(pyCMS) Creates a profile.
If colorSpace not in ``["LAB", "XYZ", "sRGB"]``,
a :exc:`PyCMSError` is raised.
If using LAB and ``colorTemp`` is not a positive integer,
a :exc:`PyCMSError` is raised.
If an error occurs while creating the profile,
a :exc:`PyCMSError` is raised.
Use this function to create common profiles on-the-fly instead of
having to supply a profile on disk and knowing the path to it. It
returns a normal CmsProfile object that can be passed to
ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
to images.
:param colorSpace: String, the color space of the profile you wish to
create.
Currently only "LAB", "XYZ", and "sRGB" are supported.
:param colorTemp: Positive number for the white point for the profile, in
degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
illuminant if omitted (5000k). colorTemp is ONLY applied to LAB
profiles, and is ignored for XYZ and sRGB.
:returns: A CmsProfile class object
:exception PyCMSError:
"""
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
msg = (
f"Color space not supported for on-the-fly profile creation ({colorSpace})"
)
raise PyCMSError(msg)
if colorSpace == "LAB":
try:
colorTemp = float(colorTemp)
except (TypeError, ValueError) as e:
msg = f'Color temperature must be numeric, "{colorTemp}" not valid'
raise PyCMSError(msg) from e
try:
return core.createProfile(colorSpace, colorTemp)
except (TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileName(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the internal product name for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile,
a :exc:`PyCMSError` is raised If an error occurs while trying
to obtain the name tag, a :exc:`PyCMSError` is raised.
Use this function to obtain the INTERNAL name of the profile (stored
in an ICC tag in the profile itself), usually the one used when the
profile was originally created. Sometimes this tag also contains
additional information supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal name of the profile as stored
in an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# do it in python, not c.
# // name was "%s - %s" (model, manufacturer) || Description ,
# // but if the Model and Manufacturer were the same or the model
# // was long, Just the model, in 1.x
model = profile.profile.model
manufacturer = profile.profile.manufacturer
if not (model or manufacturer):
return (profile.profile.profile_description or "") + "\n"
if not manufacturer or (model and len(model) > 30):
return f"{model}\n"
return f"{model} - {manufacturer}\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileInfo(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the internal product information for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile,
a :exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the info tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
info tag. This often contains details about the profile, and how it
was created, as supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# add an extra newline to preserve pyCMS compatibility
# Python, not C. the white point bits weren't working well,
# so skipping.
# info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
description = profile.profile.profile_description
cpright = profile.profile.copyright
elements = [element for element in (description, cpright) if element]
return "\r\n\r\n".join(elements) + "\r\n\r\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileCopyright(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the copyright for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the copyright tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
copyright tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.copyright or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileManufacturer(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the manufacturer for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the manufacturer tag, a
:exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
manufacturer tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.manufacturer or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileModel(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the model for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the model tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
model tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.model or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileDescription(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the description for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the description tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
description tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in an
ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.profile_description or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getDefaultIntent(profile: _CmsProfileCompatible) -> int:
"""
(pyCMS) Gets the default intent name for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the default intent, a
:exc:`PyCMSError` is raised.
Use this function to determine the default (and usually best optimized)
rendering intent for this profile. Most profiles support multiple
rendering intents, but are intended mostly for one type of conversion.
If you wish to use a different intent than returned, use
ImageCms.isIntentSupported() to verify it will work first.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: Integer 0-3 specifying the default rendering intent for this
profile.
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.rendering_intent
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def isIntentSupported(
profile: _CmsProfileCompatible, intent: Intent, direction: Direction
) -> Literal[-1, 1]:
"""
(pyCMS) Checks if a given intent is supported.
Use this function to verify that you can use your desired
``intent`` with ``profile``, and that ``profile`` can be used for the
input/output/proof profile as you desire.
Some profiles are created specifically for one "direction", can cannot
be used for others. Some profiles can only be used for certain
rendering intents, so it's best to either verify this before trying
to create a transform with them (using this function), or catch the
potential :exc:`PyCMSError` that will occur if they don't
support the modes you select.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:param intent: Integer (0-3) specifying the rendering intent you wish to
use with this profile
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param direction: Integer specifying if the profile is to be used for
input, output, or proof
INPUT = 0 (or use ImageCms.Direction.INPUT)
OUTPUT = 1 (or use ImageCms.Direction.OUTPUT)
PROOF = 2 (or use ImageCms.Direction.PROOF)
:returns: 1 if the intent/direction are supported, -1 if they are not.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# FIXME: I get different results for the same data w. different
# compilers. Bug in LittleCMS or in the binding?
if profile.profile.is_intent_supported(intent, direction):
return 1
else:
return -1
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def versions() -> tuple[str, str | None, str, str]:
"""
(pyCMS) Fetches versions.
"""
deprecate(
"PIL.ImageCms.versions()",
12,
'(PIL.features.version("littlecms2"), sys.version, PIL.__version__)',
)
return _VERSION, core.littlecms_version, sys.version.split()[0], __version__
venv\Lib\site-packages\PIL\ImageColor.py
#
# The Python Imaging Library
# $Id$
#
# map CSS3-style colour description strings to RGB
#
# History:
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-15 fl Added RGBA support
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
# 2004-07-19 fl Fixed gray/grey spelling issues
# 2009-03-05 fl Fixed rounding error in grayscale calculation
#
# Copyright (c) 2002-2004 by Secret Labs AB
# Copyright (c) 2002-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import re
from functools import lru_cache
from . import Image
@lru_cache
def getrgb(color: str) -> tuple[int, int, int] | tuple[int, int, int, int]:
"""
Convert a color string to an RGB or RGBA tuple. If the string cannot be
parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
if len(color) > 100:
msg = "color specifier is too long"
raise ValueError(msg)
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
rgb_tuple = getrgb(rgb)
assert len(rgb_tuple) == 3
colormap[color] = rgb_tuple
return rgb_tuple
# check for known string formats
if re.match("#[a-f0-9]{3}$", color):
return int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16)
if re.match("#[a-f0-9]{4}$", color):
return (
int(color[1] * 2, 16),
int(color[2] * 2, 16),
int(color[3] * 2, 16),
int(color[4] * 2, 16),
)
if re.match("#[a-f0-9]{6}$", color):
return int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)
if re.match("#[a-f0-9]{8}$", color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
int(color[7:9], 16),
)
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return int(m.group(1)), int(m.group(2)), int(m.group(3))
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5),
)
m = re.match(
r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
)
if m:
from colorsys import hls_to_rgb
rgb_floats = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb_floats[0] * 255 + 0.5),
int(rgb_floats[1] * 255 + 0.5),
int(rgb_floats[2] * 255 + 0.5),
)
m = re.match(
r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
)
if m:
from colorsys import hsv_to_rgb
rgb_floats = hsv_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(2)) / 100.0,
float(m.group(3)) / 100.0,
)
return (
int(rgb_floats[0] * 255 + 0.5),
int(rgb_floats[1] * 255 + 0.5),
int(rgb_floats[2] * 255 + 0.5),
)
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
msg = f"unknown color specifier: {repr(color)}"
raise ValueError(msg)
@lru_cache
def getcolor(color: str, mode: str) -> int | tuple[int, ...]:
"""
Same as :py:func:`~PIL.ImageColor.getrgb` for most modes. However, if
``mode`` is HSV, converts the RGB value to a HSV value, or if ``mode`` is
not color or a palette image, converts the RGB value to a grayscale value.
If the string cannot be parsed, this function raises a :py:exc:`ValueError`
exception.
.. versionadded:: 1.1.4
:param color: A color string
:param mode: Convert result to this mode
:return: ``graylevel, (graylevel, alpha) or (red, green, blue[, alpha])``
"""
# same as getrgb, but converts the result to the given mode
rgb, alpha = getrgb(color), 255
if len(rgb) == 4:
alpha = rgb[3]
rgb = rgb[:3]
if mode == "HSV":
from colorsys import rgb_to_hsv
r, g, b = rgb
h, s, v = rgb_to_hsv(r / 255, g / 255, b / 255)
return int(h * 255), int(s * 255), int(v * 255)
elif Image.getmodebase(mode) == "L":
r, g, b = rgb
# ITU-R Recommendation 601-2 for nonlinear RGB
# scaled to 24 bits to match the convert's implementation.
graylevel = (r * 19595 + g * 38470 + b * 7471 + 0x8000) >> 16
if mode[-1] == "A":
return graylevel, alpha
return graylevel
elif mode[-1] == "A":
return rgb + (alpha,)
return rgb
colormap: dict[str, str | tuple[int, int, int]] = {
# X11 colour table from https://drafts.csswg.org/css-color-4/, with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
venv\Lib\site-packages\PIL\ImageDraw.py
#
# The Python Imaging Library
# $Id$
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import math
import struct
from collections.abc import Sequence
from types import ModuleType
from typing import Any, AnyStr, Callable, Union, cast
from . import Image, ImageColor
from ._deprecate import deprecate
from ._typing import Coords
# experimental access to the outline API
Outline: Callable[[], Image.core._Outline] = Image.core.outline
TYPE_CHECKING = False
if TYPE_CHECKING:
from . import ImageDraw2, ImageFont
_Ink = Union[float, tuple[int, ...], str]
"""
A simple 2D drawing interface for PIL images.
Application code should use the Draw factory, instead of
directly.
"""
class ImageDraw:
font: (
ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont | None
) = None
def __init__(self, im: Image.Image, mode: str | None = None) -> None:
"""
Create a drawing instance.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
im.load()
if im.readonly:
im._copy() # make it writeable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
msg = "mode mismatch"
raise ValueError(msg)
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self._image = im
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1)
else:
self.ink = self.draw.draw_ink(-1)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = False
def getfont(
self,
) -> ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont:
"""
Get the current default font.
To set the default font for this ImageDraw instance::
from PIL import ImageDraw, ImageFont
draw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf")
To set the default font for all future ImageDraw instances::
from PIL import ImageDraw, ImageFont
ImageDraw.ImageDraw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf")
If the current default font is ``None``,
it is initialized with ``ImageFont.load_default()``.
:returns: An image font."""
if not self.font:
# FIXME: should add a font repository
from . import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getfont(
self, font_size: float | None
) -> ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont:
if font_size is not None:
from . import ImageFont
return ImageFont.load_default(font_size)
else:
return self.getfont()
def _getink(
self, ink: _Ink | None, fill: _Ink | None = None
) -> tuple[int | None, int | None]:
result_ink = None
result_fill = None
if ink is None and fill is None:
if self.fill:
result_fill = self.ink
else:
result_ink = self.ink
else:
if ink is not None:
if isinstance(ink, str):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and isinstance(ink, tuple):
ink = self.palette.getcolor(ink, self._image)
result_ink = self.draw.draw_ink(ink)
if fill is not None:
if isinstance(fill, str):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and isinstance(fill, tuple):
fill = self.palette.getcolor(fill, self._image)
result_fill = self.draw.draw_ink(fill)
return result_ink, result_fill
def arc(
self,
xy: Coords,
start: float,
end: float,
fill: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw an arc."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink, width)
def bitmap(
self, xy: Sequence[int], bitmap: Image.Image, fill: _Ink | None = None
) -> None:
"""Draw a bitmap."""
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
def chord(
self,
xy: Coords,
start: float,
end: float,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw a chord."""
ink, fill_ink = self._getink(outline, fill)
if fill_ink is not None:
self.draw.draw_chord(xy, start, end, fill_ink, 1)
if ink is not None and ink != fill_ink and width != 0:
self.draw.draw_chord(xy, start, end, ink, 0, width)
def ellipse(
self,
xy: Coords,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw an ellipse."""
ink, fill_ink = self._getink(outline, fill)
if fill_ink is not None:
self.draw.draw_ellipse(xy, fill_ink, 1)
if ink is not None and ink != fill_ink and width != 0:
self.draw.draw_ellipse(xy, ink, 0, width)
def circle(
self,
xy: Sequence[float],
radius: float,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw a circle given center coordinates and a radius."""
ellipse_xy = (xy[0] - radius, xy[1] - radius, xy[0] + radius, xy[1] + radius)
self.ellipse(ellipse_xy, fill, outline, width)
def line(
self,
xy: Coords,
fill: _Ink | None = None,
width: int = 0,
joint: str | None = None,
) -> None:
"""Draw a line, or a connected sequence of line segments."""
ink = self._getink(fill)[0]
if ink is not None:
self.draw.draw_lines(xy, ink, width)
if joint == "curve" and width > 4:
points: Sequence[Sequence[float]]
if isinstance(xy[0], (list, tuple)):
points = cast(Sequence[Sequence[float]], xy)
else:
points = [
cast(Sequence[float], tuple(xy[i : i + 2]))
for i in range(0, len(xy), 2)
]
for i in range(1, len(points) - 1):
point = points[i]
angles = [
math.degrees(math.atan2(end[0] - start[0], start[1] - end[1]))
% 360
for start, end in (
(points[i - 1], point),
(point, points[i + 1]),
)
]
if angles[0] == angles[1]:
# This is a straight line, so no joint is required
continue
def coord_at_angle(
coord: Sequence[float], angle: float
) -> tuple[float, ...]:
x, y = coord
angle -= 90
distance = width / 2 - 1
return tuple(
p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d))
for p, p_d in (
(x, distance * math.cos(math.radians(angle))),
(y, distance * math.sin(math.radians(angle))),
)
)
flipped = (
angles[1] > angles[0] and angles[1] - 180 > angles[0]
) or (angles[1] < angles[0] and angles[1] + 180 > angles[0])
coords = [
(point[0] - width / 2 + 1, point[1] - width / 2 + 1),
(point[0] + width / 2 - 1, point[1] + width / 2 - 1),
]
if flipped:
start, end = (angles[1] + 90, angles[0] + 90)
else:
start, end = (angles[0] - 90, angles[1] - 90)
self.pieslice(coords, start - 90, end - 90, fill)
if width > 8:
# Cover potential gaps between the line and the joint
if flipped:
gap_coords = [
coord_at_angle(point, angles[0] + 90),
point,
coord_at_angle(point, angles[1] + 90),
]
else:
gap_coords = [
coord_at_angle(point, angles[0] - 90),
point,
coord_at_angle(point, angles[1] - 90),
]
self.line(gap_coords, fill, width=3)
def shape(
self,
shape: Image.core._Outline,
fill: _Ink | None = None,
outline: _Ink | None = None,
) -> None:
"""(Experimental) Draw a shape."""
shape.close()
ink, fill_ink = self._getink(outline, fill)
if fill_ink is not None:
self.draw.draw_outline(shape, fill_ink, 1)
if ink is not None and ink != fill_ink:
self.draw.draw_outline(shape, ink, 0)
def pieslice(
self,
xy: Coords,
start: float,
end: float,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw a pieslice."""
ink, fill_ink = self._getink(outline, fill)
if fill_ink is not None:
self.draw.draw_pieslice(xy, start, end, fill_ink, 1)
if ink is not None and ink != fill_ink and width != 0:
self.draw.draw_pieslice(xy, start, end, ink, 0, width)
def point(self, xy: Coords, fill: _Ink | None = None) -> None:
"""Draw one or more individual pixels."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
def polygon(
self,
xy: Coords,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw a polygon."""
ink, fill_ink = self._getink(outline, fill)
if fill_ink is not None:
self.draw.draw_polygon(xy, fill_ink, 1)
if ink is not None and ink != fill_ink and width != 0:
if width == 1:
self.draw.draw_polygon(xy, ink, 0, width)
elif self.im is not None:
# To avoid expanding the polygon outwards,
# use the fill as a mask
mask = Image.new("1", self.im.size)
mask_ink = self._getink(1)[0]
draw = Draw(mask)
draw.draw.draw_polygon(xy, mask_ink, 1)
self.draw.draw_polygon(xy, ink, 0, width * 2 - 1, mask.im)
def regular_polygon(
self,
bounding_circle: Sequence[Sequence[float] | float],
n_sides: int,
rotation: float = 0,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw a regular polygon."""
xy = _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation)
self.polygon(xy, fill, outline, width)
def rectangle(
self,
xy: Coords,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
) -> None:
"""Draw a rectangle."""
ink, fill_ink = self._getink(outline, fill)
if fill_ink is not None:
self.draw.draw_rectangle(xy, fill_ink, 1)
if ink is not None and ink != fill_ink and width != 0:
self.draw.draw_rectangle(xy, ink, 0, width)
def rounded_rectangle(
self,
xy: Coords,
radius: float = 0,
fill: _Ink | None = None,
outline: _Ink | None = None,
width: int = 1,
*,
corners: tuple[bool, bool, bool, bool] | None = None,
) -> None:
"""Draw a rounded rectangle."""
if isinstance(xy[0], (list, tuple)):
(x0, y0), (x1, y1) = cast(Sequence[Sequence[float]], xy)
else:
x0, y0, x1, y1 = cast(Sequence[float], xy)
if x1 < x0:
msg = "x1 must be greater than or equal to x0"
raise ValueError(msg)
if y1 < y0:
msg = "y1 must be greater than or equal to y0"
raise ValueError(msg)
if corners is None:
corners = (True, True, True, True)
d = radius * 2
x0 = round(x0)
y0 = round(y0)
x1 = round(x1)
y1 = round(y1)
full_x, full_y = False, False
if all(corners):
full_x = d >= x1 - x0 - 1
if full_x:
# The two left and two right corners are joined
d = x1 - x0
full_y = d >= y1 - y0 - 1
if full_y:
# The two top and two bottom corners are joined
d = y1 - y0
if full_x and full_y:
# If all corners are joined, that is a circle
return self.ellipse(xy, fill, outline, width)
if d == 0 or not any(corners):
# If the corners have no curve,
# or there are no corners,
# that is a rectangle
return self.rectangle(xy, fill, outline, width)
r = int(d // 2)
ink, fill_ink = self._getink(outline, fill)
def draw_corners(pieslice: bool) -> None:
parts: tuple[tuple[tuple[float, float, float, float], int, int], ...]
if full_x:
# Draw top and bottom halves
parts = (
((x0, y0, x0 + d, y0 + d), 180, 360),
((x0, y1 - d, x0 + d, y1), 0, 180),
)
elif full_y:
# Draw left and right halves
parts = (
((x0, y0, x0 + d, y0 + d), 90, 270),
((x1 - d, y0, x1, y0 + d), 270, 90),
)
else:
# Draw four separate corners
parts = tuple(
part
for i, part in enumerate(
(
((x0, y0, x0 + d, y0 + d), 180, 270),
((x1 - d, y0, x1, y0 + d), 270, 360),
((x1 - d, y1 - d, x1, y1), 0, 90),
((x0, y1 - d, x0 + d, y1), 90, 180),
)
)
if corners[i]
)
for part in parts:
if pieslice:
self.draw.draw_pieslice(*(part + (fill_ink, 1)))
else:
self.draw.draw_arc(*(part + (ink, width)))
if fill_ink is not None:
draw_corners(True)
if full_x:
self.draw.draw_rectangle((x0, y0 + r + 1, x1, y1 - r - 1), fill_ink, 1)
elif x1 - r - 1 > x0 + r + 1:
self.draw.draw_rectangle((x0 + r + 1, y0, x1 - r - 1, y1), fill_ink, 1)
if not full_x and not full_y:
left = [x0, y0, x0 + r, y1]
if corners[0]:
left[1] += r + 1
if corners[3]:
left[3] -= r + 1
self.draw.draw_rectangle(left, fill_ink, 1)
right = [x1 - r, y0, x1, y1]
if corners[1]:
right[1] += r + 1
if corners[2]:
right[3] -= r + 1
self.draw.draw_rectangle(right, fill_ink, 1)
if ink is not None and ink != fill_ink and width != 0:
draw_corners(False)
if not full_x:
top = [x0, y0, x1, y0 + width - 1]
if corners[0]:
top[0] += r + 1
if corners[1]:
top[2] -= r + 1
self.draw.draw_rectangle(top, ink, 1)
bottom = [x0, y1 - width + 1, x1, y1]
if corners[3]:
bottom[0] += r + 1
if corners[2]:
bottom[2] -= r + 1
self.draw.draw_rectangle(bottom, ink, 1)
if not full_y:
left = [x0, y0, x0 + width - 1, y1]
if corners[0]:
left[1] += r + 1
if corners[3]:
left[3] -= r + 1
self.draw.draw_rectangle(left, ink, 1)
right = [x1 - width + 1, y0, x1, y1]
if corners[1]:
right[1] += r + 1
if corners[2]:
right[3] -= r + 1
self.draw.draw_rectangle(right, ink, 1)
def _multiline_check(self, text: AnyStr) -> bool:
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def text(
self,
xy: tuple[float, float],
text: AnyStr,
fill: _Ink | None = None,
font: (
ImageFont.ImageFont
| ImageFont.FreeTypeFont
| ImageFont.TransposedFont
| None
) = None,
anchor: str | None = None,
spacing: float = 4,
align: str = "left",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
stroke_width: float = 0,
stroke_fill: _Ink | None = None,
embedded_color: bool = False,
*args: Any,
**kwargs: Any,
) -> None:
"""Draw text."""
if embedded_color and self.mode not in ("RGB", "RGBA"):
msg = "Embedded color supported only in RGB and RGBA modes"
raise ValueError(msg)
if font is None:
font = self._getfont(kwargs.get("font_size"))
if self._multiline_check(text):
return self.multiline_text(
xy,
text,
fill,
font,
anchor,
spacing,
align,
direction,
features,
language,
stroke_width,
stroke_fill,
embedded_color,
)
def getink(fill: _Ink | None) -> int:
ink, fill_ink = self._getink(fill)
if ink is None:
assert fill_ink is not None
return fill_ink
return ink
def draw_text(ink: int, stroke_width: float = 0) -> None:
mode = self.fontmode
if stroke_width == 0 and embedded_color:
mode = "RGBA"
coord = []
for i in range(2):
coord.append(int(xy[i]))
start = (math.modf(xy[0])[0], math.modf(xy[1])[0])
try:
mask, offset = font.getmask2( # type: ignore[union-attr,misc]
text,
mode,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
stroke_filled=True,
anchor=anchor,
ink=ink,
start=start,
*args,
**kwargs,
)
coord = [coord[0] + offset[0], coord[1] + offset[1]]
except AttributeError:
try:
mask = font.getmask( # type: ignore[misc]
text,
mode,
direction,
features,
language,
stroke_width,
anchor,
ink,
start=start,
*args,
**kwargs,
)
except TypeError:
mask = font.getmask(text)
if mode == "RGBA":
# font.getmask2(mode="RGBA") returns color in RGB bands and mask in A
# extract mask and set text alpha
color, mask = mask, mask.getband(3)
ink_alpha = struct.pack("i", ink)[3]
color.fillband(3, ink_alpha)
x, y = coord
if self.im is not None:
self.im.paste(
color, (x, y, x + mask.size[0], y + mask.size[1]), mask
)
else:
self.draw.draw_bitmap(coord, mask, ink)
ink = getink(fill)
if ink is not None:
stroke_ink = None
if stroke_width:
stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink
if stroke_ink is not None:
# Draw stroked text
draw_text(stroke_ink, stroke_width)
# Draw normal text
if ink != stroke_ink:
draw_text(ink)
else:
# Only draw normal text
draw_text(ink)
def _prepare_multiline_text(
self,
xy: tuple[float, float],
text: AnyStr,
font: (
ImageFont.ImageFont
| ImageFont.FreeTypeFont
| ImageFont.TransposedFont
| None
),
anchor: str | None,
spacing: float,
align: str,
direction: str | None,
features: list[str] | None,
language: str | None,
stroke_width: float,
embedded_color: bool,
font_size: float | None,
) -> tuple[
ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont,
list[tuple[tuple[float, float], str, AnyStr]],
]:
if anchor is None:
anchor = "lt" if direction == "ttb" else "la"
elif len(anchor) != 2:
msg = "anchor must be a 2 character string"
raise ValueError(msg)
elif anchor[1] in "tb" and direction != "ttb":
msg = "anchor not supported for multiline text"
raise ValueError(msg)
if font is None:
font = self._getfont(font_size)
lines = text.split("\n" if isinstance(text, str) else b"\n")
line_spacing = (
self.textbbox((0, 0), "A", font, stroke_width=stroke_width)[3]
+ stroke_width
+ spacing
)
top = xy[1]
parts = []
if direction == "ttb":
left = xy[0]
for line in lines:
parts.append(((left, top), anchor, line))
left += line_spacing
else:
widths = []
max_width: float = 0
for line in lines:
line_width = self.textlength(
line,
font,
direction=direction,
features=features,
language=language,
embedded_color=embedded_color,
)
widths.append(line_width)
max_width = max(max_width, line_width)
if anchor[1] == "m":
top -= (len(lines) - 1) * line_spacing / 2.0
elif anchor[1] == "d":
top -= (len(lines) - 1) * line_spacing
for idx, line in enumerate(lines):
left = xy[0]
width_difference = max_width - widths[idx]
# align by align parameter
if align in ("left", "justify"):
pass
elif align == "center":
left += width_difference / 2.0
elif align == "right":
left += width_difference
else:
msg = 'align must be "left", "center", "right" or "justify"'
raise ValueError(msg)
if (
align == "justify"
and width_difference != 0
and idx != len(lines) - 1
):
words = line.split(" " if isinstance(text, str) else b" ")
if len(words) > 1:
# align left by anchor
if anchor[0] == "m":
left -= max_width / 2.0
elif anchor[0] == "r":
left -= max_width
word_widths = [
self.textlength(
word,
font,
direction=direction,
features=features,
language=language,
embedded_color=embedded_color,
)
for word in words
]
word_anchor = "l" + anchor[1]
width_difference = max_width - sum(word_widths)
for i, word in enumerate(words):
parts.append(((left, top), word_anchor, word))
left += word_widths[i] + width_difference / (len(words) - 1)
top += line_spacing
continue
# align left by anchor
if anchor[0] == "m":
left -= width_difference / 2.0
elif anchor[0] == "r":
left -= width_difference
parts.append(((left, top), anchor, line))
top += line_spacing
return font, parts
def multiline_text(
self,
xy: tuple[float, float],
text: AnyStr,
fill: _Ink | None = None,
font: (
ImageFont.ImageFont
| ImageFont.FreeTypeFont
| ImageFont.TransposedFont
| None
) = None,
anchor: str | None = None,
spacing: float = 4,
align: str = "left",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
stroke_width: float = 0,
stroke_fill: _Ink | None = None,
embedded_color: bool = False,
*,
font_size: float | None = None,
) -> None:
font, lines = self._prepare_multiline_text(
xy,
text,
font,
anchor,
spacing,
align,
direction,
features,
language,
stroke_width,
embedded_color,
font_size,
)
for xy, anchor, line in lines:
self.text(
xy,
line,
fill,
font,
anchor,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
stroke_fill=stroke_fill,
embedded_color=embedded_color,
)
def textlength(
self,
text: AnyStr,
font: (
ImageFont.ImageFont
| ImageFont.FreeTypeFont
| ImageFont.TransposedFont
| None
) = None,
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
embedded_color: bool = False,
*,
font_size: float | None = None,
) -> float:
"""Get the length of a given string, in pixels with 1/64 precision."""
if self._multiline_check(text):
msg = "can't measure length of multiline text"
raise ValueError(msg)
if embedded_color and self.mode not in ("RGB", "RGBA"):
msg = "Embedded color supported only in RGB and RGBA modes"
raise ValueError(msg)
if font is None:
font = self._getfont(font_size)
mode = "RGBA" if embedded_color else self.fontmode
return font.getlength(text, mode, direction, features, language)
def textbbox(
self,
xy: tuple[float, float],
text: AnyStr,
font: (
ImageFont.ImageFont
| ImageFont.FreeTypeFont
| ImageFont.TransposedFont
| None
) = None,
anchor: str | None = None,
spacing: float = 4,
align: str = "left",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
stroke_width: float = 0,
embedded_color: bool = False,
*,
font_size: float | None = None,
) -> tuple[float, float, float, float]:
"""Get the bounding box of a given string, in pixels."""
if embedded_color and self.mode not in ("RGB", "RGBA"):
msg = "Embedded color supported only in RGB and RGBA modes"
raise ValueError(msg)
if font is None:
font = self._getfont(font_size)
if self._multiline_check(text):
return self.multiline_textbbox(
xy,
text,
font,
anchor,
spacing,
align,
direction,
features,
language,
stroke_width,
embedded_color,
)
mode = "RGBA" if embedded_color else self.fontmode
bbox = font.getbbox(
text, mode, direction, features, language, stroke_width, anchor
)
return bbox[0] + xy[0], bbox[1] + xy[1], bbox[2] + xy[0], bbox[3] + xy[1]
def multiline_textbbox(
self,
xy: tuple[float, float],
text: AnyStr,
font: (
ImageFont.ImageFont
| ImageFont.FreeTypeFont
| ImageFont.TransposedFont
| None
) = None,
anchor: str | None = None,
spacing: float = 4,
align: str = "left",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
stroke_width: float = 0,
embedded_color: bool = False,
*,
font_size: float | None = None,
) -> tuple[float, float, float, float]:
font, lines = self._prepare_multiline_text(
xy,
text,
font,
anchor,
spacing,
align,
direction,
features,
language,
stroke_width,
embedded_color,
font_size,
)
bbox: tuple[float, float, float, float] | None = None
for xy, anchor, line in lines:
bbox_line = self.textbbox(
xy,
line,
font,
anchor,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
embedded_color=embedded_color,
)
if bbox is None:
bbox = bbox_line
else:
bbox = (
min(bbox[0], bbox_line[0]),
min(bbox[1], bbox_line[1]),
max(bbox[2], bbox_line[2]),
max(bbox[3], bbox_line[3]),
)
if bbox is None:
return xy[0], xy[1], xy[0], xy[1]
return bbox
def Draw(im: Image.Image, mode: str | None = None) -> ImageDraw:
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
try:
return getattr(im, "getdraw")(mode)
except AttributeError:
return ImageDraw(im, mode)
def getdraw(
im: Image.Image | None = None, hints: list[str] | None = None
) -> tuple[ImageDraw2.Draw | None, ModuleType]:
"""
:param im: The image to draw in.
:param hints: An optional list of hints. Deprecated.
:returns: A (drawing context, drawing resource factory) tuple.
"""
if hints is not None:
deprecate("'hints' parameter", 12)
from . import ImageDraw2
draw = ImageDraw2.Draw(im) if im is not None else None
return draw, ImageDraw2
def floodfill(
image: Image.Image,
xy: tuple[int, int],
value: float | tuple[int, ...],
border: float | tuple[int, ...] | None = None,
thresh: float = 0,
) -> None:
"""
.. warning:: This method is experimental.
Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple). See
:ref:`coordinate-system`.
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of
non-homogeneous, but similar, colors.
"""
# based on an implementation by Eric S. Raymond
# amended by yo1995 @20180806
pixel = image.load()
assert pixel is not None
x, y = xy
try:
background = pixel[x, y]
if _color_diff(value, background) <= thresh:
return # seed point already has fill color
pixel[x, y] = value
except (ValueError, IndexError):
return # seed point outside image
edge = {(x, y)}
# use a set to keep record of current and previous edge pixels
# to reduce memory consumption
full_edge = set()
while edge:
new_edge = set()
for x, y in edge: # 4 adjacent method
for s, t in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)):
# If already processed, or if a coordinate is negative, skip
if (s, t) in full_edge or s < 0 or t < 0:
continue
try:
p = pixel[s, t]
except (ValueError, IndexError):
pass
else:
full_edge.add((s, t))
if border is None:
fill = _color_diff(p, background) <= thresh
else:
fill = p not in (value, border)
if fill:
pixel[s, t] = value
new_edge.add((s, t))
full_edge = edge # discard pixels processed
edge = new_edge
def _compute_regular_polygon_vertices(
bounding_circle: Sequence[Sequence[float] | float], n_sides: int, rotation: float
) -> list[tuple[float, float]]:
"""
Generate a list of vertices for a 2D regular polygon.
:param bounding_circle: The bounding circle is a sequence defined
by a point and radius. The polygon is inscribed in this circle.
(e.g. ``bounding_circle=(x, y, r)`` or ``((x, y), r)``)
:param n_sides: Number of sides
(e.g. ``n_sides=3`` for a triangle, ``6`` for a hexagon)
:param rotation: Apply an arbitrary rotation to the polygon
(e.g. ``rotation=90``, applies a 90 degree rotation)
:return: List of regular polygon vertices
(e.g. ``[(25, 50), (50, 50), (50, 25), (25, 25)]``)
How are the vertices computed?
1. Compute the following variables
- theta: Angle between the apothem & the nearest polygon vertex
- side_length: Length of each polygon edge
- centroid: Center of bounding circle (1st, 2nd elements of bounding_circle)
- polygon_radius: Polygon radius (last element of bounding_circle)
- angles: Location of each polygon vertex in polar grid
(e.g. A square with 0 degree rotation => [225.0, 315.0, 45.0, 135.0])
2. For each angle in angles, get the polygon vertex at that angle
The vertex is computed using the equation below.
X= xcos(φ) + ysin(φ)
Y= −xsin(φ) + ycos(φ)
Note:
φ = angle in degrees
x = 0
y = polygon_radius
The formula above assumes rotation around the origin.
In our case, we are rotating around the centroid.
To account for this, we use the formula below
X = xcos(φ) + ysin(φ) + centroid_x
Y = −xsin(φ) + ycos(φ) + centroid_y
"""
# 1. Error Handling
# 1.1 Check `n_sides` has an appropriate value
if not isinstance(n_sides, int):
msg = "n_sides should be an int" # type: ignore[unreachable]
raise TypeError(msg)
if n_sides < 3:
msg = "n_sides should be an int > 2"
raise ValueError(msg)
# 1.2 Check `bounding_circle` has an appropriate value
if not isinstance(bounding_circle, (list, tuple)):
msg = "bounding_circle should be a sequence"
raise TypeError(msg)
if len(bounding_circle) == 3:
if not all(isinstance(i, (int, float)) for i in bounding_circle):
msg = "bounding_circle should only contain numeric data"
raise ValueError(msg)
*centroid, polygon_radius = cast(list[float], list(bounding_circle))
elif len(bounding_circle) == 2 and isinstance(bounding_circle[0], (list, tuple)):
if not all(
isinstance(i, (int, float)) for i in bounding_circle[0]
) or not isinstance(bounding_circle[1], (int, float)):
msg = "bounding_circle should only contain numeric data"
raise ValueError(msg)
if len(bounding_circle[0]) != 2:
msg = "bounding_circle centre should contain 2D coordinates (e.g. (x, y))"
raise ValueError(msg)
centroid = cast(list[float], list(bounding_circle[0]))
polygon_radius = cast(float, bounding_circle[1])
else:
msg = (
"bounding_circle should contain 2D coordinates "
"and a radius (e.g. (x, y, r) or ((x, y), r) )"
)
raise ValueError(msg)
if polygon_radius <= 0:
msg = "bounding_circle radius should be > 0"
raise ValueError(msg)
# 1.3 Check `rotation` has an appropriate value
if not isinstance(rotation, (int, float)):
msg = "rotation should be an int or float" # type: ignore[unreachable]
raise ValueError(msg)
# 2. Define Helper Functions
def _apply_rotation(point: list[float], degrees: float) -> tuple[float, float]:
return (
round(
point[0] * math.cos(math.radians(360 - degrees))
- point[1] * math.sin(math.radians(360 - degrees))
+ centroid[0],
2,
),
round(
point[1] * math.cos(math.radians(360 - degrees))
+ point[0] * math.sin(math.radians(360 - degrees))
+ centroid[1],
2,
),
)
def _compute_polygon_vertex(angle: float) -> tuple[float, float]:
start_point = [polygon_radius, 0]
return _apply_rotation(start_point, angle)
def _get_angles(n_sides: int, rotation: float) -> list[float]:
angles = []
degrees = 360 / n_sides
# Start with the bottom left polygon vertex
current_angle = (270 - 0.5 * degrees) + rotation
for _ in range(n_sides):
angles.append(current_angle)
current_angle += degrees
if current_angle > 360:
current_angle -= 360
return angles
# 3. Variable Declarations
angles = _get_angles(n_sides, rotation)
# 4. Compute Vertices
return [_compute_polygon_vertex(angle) for angle in angles]
def _color_diff(
color1: float | tuple[int, ...], color2: float | tuple[int, ...]
) -> float:
"""
Uses 1-norm distance to calculate difference between two values.
"""
first = color1 if isinstance(color1, tuple) else (color1,)
second = color2 if isinstance(color2, tuple) else (color2,)
return sum(abs(first[i] - second[i]) for i in range(len(second)))
venv\Lib\site-packages\PIL\ImageDraw2.py
#
# The Python Imaging Library
# $Id$
#
# WCK-style drawing interface operations
#
# History:
# 2003-12-07 fl created
# 2005-05-15 fl updated; added to PIL as ImageDraw2
# 2005-05-15 fl added text support
# 2005-05-20 fl added arc/chord/pieslice support
#
# Copyright (c) 2003-2005 by Secret Labs AB
# Copyright (c) 2003-2005 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
"""
(Experimental) WCK-style drawing interface operations
.. seealso:: :py:mod:`PIL.ImageDraw`
"""
from __future__ import annotations
from typing import Any, AnyStr, BinaryIO
from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath
from ._typing import Coords, StrOrBytesPath
class Pen:
"""Stores an outline color and width."""
def __init__(self, color: str, width: int = 1, opacity: int = 255) -> None:
self.color = ImageColor.getrgb(color)
self.width = width
class Brush:
"""Stores a fill color"""
def __init__(self, color: str, opacity: int = 255) -> None:
self.color = ImageColor.getrgb(color)
class Font:
"""Stores a TrueType font and color"""
def __init__(
self, color: str, file: StrOrBytesPath | BinaryIO, size: float = 12
) -> None:
# FIXME: add support for bitmap fonts
self.color = ImageColor.getrgb(color)
self.font = ImageFont.truetype(file, size)
class Draw:
"""
(Experimental) WCK-style drawing interface
"""
def __init__(
self,
image: Image.Image | str,
size: tuple[int, int] | list[int] | None = None,
color: float | tuple[float, ...] | str | None = None,
) -> None:
if isinstance(image, str):
if size is None:
msg = "If image argument is mode string, size must be a list or tuple"
raise ValueError(msg)
image = Image.new(image, size, color)
self.draw = ImageDraw.Draw(image)
self.image = image
self.transform: tuple[float, float, float, float, float, float] | None = None
def flush(self) -> Image.Image:
return self.image
def render(
self,
op: str,
xy: Coords,
pen: Pen | Brush | None,
brush: Brush | Pen | None = None,
**kwargs: Any,
) -> None:
# handle color arguments
outline = fill = None
width = 1
if isinstance(pen, Pen):
outline = pen.color
width = pen.width
elif isinstance(brush, Pen):
outline = brush.color
width = brush.width
if isinstance(brush, Brush):
fill = brush.color
elif isinstance(pen, Brush):
fill = pen.color
# handle transformation
if self.transform:
path = ImagePath.Path(xy)
path.transform(self.transform)
xy = path
# render the item
if op in ("arc", "line"):
kwargs.setdefault("fill", outline)
else:
kwargs.setdefault("fill", fill)
kwargs.setdefault("outline", outline)
if op == "line":
kwargs.setdefault("width", width)
getattr(self.draw, op)(xy, **kwargs)
def settransform(self, offset: tuple[float, float]) -> None:
"""Sets a transformation offset."""
(xoffset, yoffset) = offset
self.transform = (1, 0, xoffset, 0, 1, yoffset)
def arc(
self,
xy: Coords,
pen: Pen | Brush | None,
start: float,
end: float,
*options: Any,
) -> None:
"""
Draws an arc (a portion of a circle outline) between the start and end
angles, inside the given bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc`
"""
self.render("arc", xy, pen, *options, start=start, end=end)
def chord(
self,
xy: Coords,
pen: Pen | Brush | None,
start: float,
end: float,
*options: Any,
) -> None:
"""
Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points
with a straight line.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord`
"""
self.render("chord", xy, pen, *options, start=start, end=end)
def ellipse(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws an ellipse inside the given bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse`
"""
self.render("ellipse", xy, pen, *options)
def line(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws a line between the coordinates in the ``xy`` list.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line`
"""
self.render("line", xy, pen, *options)
def pieslice(
self,
xy: Coords,
pen: Pen | Brush | None,
start: float,
end: float,
*options: Any,
) -> None:
"""
Same as arc, but also draws straight lines between the end points and the
center of the bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice`
"""
self.render("pieslice", xy, pen, *options, start=start, end=end)
def polygon(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws a polygon.
The polygon outline consists of straight lines between the given
coordinates, plus a straight line between the last and the first
coordinate.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon`
"""
self.render("polygon", xy, pen, *options)
def rectangle(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None:
"""
Draws a rectangle.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle`
"""
self.render("rectangle", xy, pen, *options)
def text(self, xy: tuple[float, float], text: AnyStr, font: Font) -> None:
"""
Draws the string at the given position.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text`
"""
if self.transform:
path = ImagePath.Path(xy)
path.transform(self.transform)
xy = path
self.draw.text(xy, text, font=font.font, fill=font.color)
def textbbox(
self, xy: tuple[float, float], text: AnyStr, font: Font
) -> tuple[float, float, float, float]:
"""
Returns bounding box (in pixels) of given text.
:return: ``(left, top, right, bottom)`` bounding box
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textbbox`
"""
if self.transform:
path = ImagePath.Path(xy)
path.transform(self.transform)
xy = path
return self.draw.textbbox(xy, text, font=font.font)
def textlength(self, text: AnyStr, font: Font) -> float:
"""
Returns length (in pixels) of given text.
This is the amount by which following text should be offset.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textlength`
"""
return self.draw.textlength(text, font=font.font)
venv\Lib\site-packages\PIL\ImageEnhance.py
#
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.graficaobscura.com/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFilter, ImageStat
class _Enhance:
image: Image.Image
degenerate: Image.Image
def enhance(self, factor: float) -> Image.Image:
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image: Image.Image) -> None:
self.image = image
self.intermediate_mode = "L"
if "A" in image.getbands():
self.intermediate_mode = "LA"
if self.intermediate_mode != image.mode:
image = image.convert(self.intermediate_mode).convert(image.mode)
self.degenerate = image
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid gray image. A factor of 1.0 gives the original image.
"""
def __init__(self, image: Image.Image) -> None:
self.image = image
if image.mode != "L":
image = image.convert("L")
mean = int(ImageStat.Stat(image).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean)
if self.degenerate.mode != self.image.mode:
self.degenerate = self.degenerate.convert(self.image.mode)
if "A" in self.image.getbands():
self.degenerate.putalpha(self.image.getchannel("A"))
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brightness of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image: Image.Image) -> None:
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image: Image.Image) -> None:
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
if "A" in image.getbands():
self.degenerate.putalpha(image.getchannel("A"))
venv\Lib\site-packages\PIL\ImageFile.py
#
# The Python Imaging Library.
# $Id$
#
# base class for image file handlers
#
# history:
# 1995-09-09 fl Created
# 1996-03-11 fl Fixed load mechanism.
# 1996-04-15 fl Added pcx/xbm decoders.
# 1996-04-30 fl Added encoders.
# 1996-12-14 fl Added load helpers
# 1997-01-11 fl Use encode_to_file where possible
# 1997-08-27 fl Flush output in _save
# 1998-03-05 fl Use memory mapping for some modes
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
# 1999-05-31 fl Added image parser
# 2000-10-12 fl Set readonly flag on memory-mapped images
# 2002-03-20 fl Use better messages for common decoder errors
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
# 2003-10-30 fl Added StubImageFile class
# 2004-02-25 fl Made incremental parser more robust
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1995-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import abc
import io
import itertools
import logging
import os
import struct
from typing import IO, Any, NamedTuple, cast
from . import ExifTags, Image
from ._deprecate import deprecate
from ._util import DeferredError, is_path
TYPE_CHECKING = False
if TYPE_CHECKING:
from ._typing import StrOrBytesPath
logger = logging.getLogger(__name__)
MAXBLOCK = 65536
SAFEBLOCK = 1024 * 1024
LOAD_TRUNCATED_IMAGES = False
"""Whether or not to load truncated image files. User code may change this."""
ERRORS = {
-1: "image buffer overrun error",
-2: "decoding error",
-3: "unknown error",
-8: "bad configuration",
-9: "out of memory error",
}
"""
Dict of known error codes returned from :meth:`.PyDecoder.decode`,
:meth:`.PyEncoder.encode` :meth:`.PyEncoder.encode_to_pyfd` and
:meth:`.PyEncoder.encode_to_file`.
"""
#
# --------------------------------------------------------------------
# Helpers
def _get_oserror(error: int, *, encoder: bool) -> OSError:
try:
msg = Image.core.getcodecstatus(error)
except AttributeError:
msg = ERRORS.get(error)
if not msg:
msg = f"{'encoder' if encoder else 'decoder'} error {error}"
msg += f" when {'writing' if encoder else 'reading'} image file"
return OSError(msg)
def raise_oserror(error: int) -> OSError:
deprecate(
"raise_oserror",
12,
action="It is only useful for translating error codes returned by a codec's "
"decode() method, which ImageFile already does automatically.",
)
raise _get_oserror(error, encoder=False)
def _tilesort(t: _Tile) -> int:
# sort on offset
return t[2]
class _Tile(NamedTuple):
codec_name: str
extents: tuple[int, int, int, int] | None
offset: int = 0
args: tuple[Any, ...] | str | None = None
#
# --------------------------------------------------------------------
# ImageFile base class
class ImageFile(Image.Image):
"""Base class for image file format handlers."""
def __init__(
self, fp: StrOrBytesPath | IO[bytes], filename: str | bytes | None = None
) -> None:
super().__init__()
self._min_frame = 0
self.custom_mimetype: str | None = None
self.tile: list[_Tile] = []
""" A list of tile descriptors """
self.readonly = 1 # until we know better
self.decoderconfig: tuple[Any, ...] = ()
self.decodermaxblock = MAXBLOCK
if is_path(fp):
# filename
self.fp = open(fp, "rb")
self.filename = os.fspath(fp)
self._exclusive_fp = True
else:
# stream
self.fp = cast(IO[bytes], fp)
self.filename = filename if filename is not None else ""
# can be overridden
self._exclusive_fp = False
try:
try:
self._open()
except (
IndexError, # end of data
TypeError, # end of data (ord)
KeyError, # unsupported mode
EOFError, # got header but not the first frame
struct.error,
) as v:
raise SyntaxError(v) from v
if not self.mode or self.size[0] <= 0 or self.size[1] <= 0:
msg = "not identified by this driver"
raise SyntaxError(msg)
except BaseException:
# close the file only if we have opened it this constructor
if self._exclusive_fp:
self.fp.close()
raise
def _open(self) -> None:
pass
def _close_fp(self):
if getattr(self, "_fp", False) and not isinstance(self._fp, DeferredError):
if self._fp != self.fp:
self._fp.close()
self._fp = DeferredError(ValueError("Operation on closed image"))
if self.fp:
self.fp.close()
def close(self) -> None:
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
try:
self._close_fp()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
super().close()
def get_child_images(self) -> list[ImageFile]:
child_images = []
exif = self.getexif()
ifds = []
if ExifTags.Base.SubIFDs in exif:
subifd_offsets = exif[ExifTags.Base.SubIFDs]
if subifd_offsets:
if not isinstance(subifd_offsets, tuple):
subifd_offsets = (subifd_offsets,)
for subifd_offset in subifd_offsets:
ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset))
ifd1 = exif.get_ifd(ExifTags.IFD.IFD1)
if ifd1 and ifd1.get(ExifTags.Base.JpegIFOffset):
assert exif._info is not None
ifds.append((ifd1, exif._info.next))
offset = None
for ifd, ifd_offset in ifds:
assert self.fp is not None
current_offset = self.fp.tell()
if offset is None:
offset = current_offset
fp = self.fp
if ifd is not None:
thumbnail_offset = ifd.get(ExifTags.Base.JpegIFOffset)
if thumbnail_offset is not None:
thumbnail_offset += getattr(self, "_exif_offset", 0)
self.fp.seek(thumbnail_offset)
length = ifd.get(ExifTags.Base.JpegIFByteCount)
assert isinstance(length, int)
data = self.fp.read(length)
fp = io.BytesIO(data)
with Image.open(fp) as im:
from . import TiffImagePlugin
if thumbnail_offset is None and isinstance(
im, TiffImagePlugin.TiffImageFile
):
im._frame_pos = [ifd_offset]
im._seek(0)
im.load()
child_images.append(im)
if offset is not None:
assert self.fp is not None
self.fp.seek(offset)
return child_images
def get_format_mimetype(self) -> str | None:
if self.custom_mimetype:
return self.custom_mimetype
if self.format is not None:
return Image.MIME.get(self.format.upper())
return None
def __getstate__(self) -> list[Any]:
return super().__getstate__() + [self.filename]
def __setstate__(self, state: list[Any]) -> None:
self.tile = []
if len(state) > 5:
self.filename = state[5]
super().__setstate__(state)
def verify(self) -> None:
"""Check file integrity"""
# raise exception if something's wrong. must be called
# directly after open, and closes file when finished.
if self._exclusive_fp:
self.fp.close()
self.fp = None
def load(self) -> Image.core.PixelAccess | None:
"""Load image data based on tile list"""
if not self.tile and self._im is None:
msg = "cannot load this image"
raise OSError(msg)
pixel = Image.Image.load(self)
if not self.tile:
return pixel
self.map: mmap.mmap | None = None
use_mmap = self.filename and len(self.tile) == 1
readonly = 0
# look for read/seek overrides
if hasattr(self, "load_read"):
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
else:
read = self.fp.read
if hasattr(self, "load_seek"):
seek = self.load_seek
use_mmap = False
else:
seek = self.fp.seek
if use_mmap:
# try memory mapping
decoder_name, extents, offset, args = self.tile[0]
if isinstance(args, str):
args = (args, 0, 1)
if (
decoder_name == "raw"
and isinstance(args, tuple)
and len(args) >= 3
and args[0] == self.mode
and args[0] in Image._MAPMODES
):
try:
# use mmap, if possible
import mmap
with open(self.filename) as fp:
self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
if offset + self.size[1] * args[1] > self.map.size():
msg = "buffer is not large enough"
raise OSError(msg)
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, offset, args
)
readonly = 1
# After trashing self.im,
# we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, OSError, ImportError):
self.map = None
self.load_prepare()
err_code = -3 # initialize to unknown error
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = getattr(self, "tile_prefix", b"")
# Remove consecutive duplicates that only differ by their offset
self.tile = [
list(tiles)[-1]
for _, tiles in itertools.groupby(
self.tile, lambda tile: (tile[0], tile[1], tile[3])
)
]
for i, (decoder_name, extents, offset, args) in enumerate(self.tile):
seek(offset)
decoder = Image._getdecoder(
self.mode, decoder_name, args, self.decoderconfig
)
try:
decoder.setimage(self.im, extents)
if decoder.pulls_fd:
decoder.setfd(self.fp)
err_code = decoder.decode(b"")[1]
else:
b = prefix
while True:
read_bytes = self.decodermaxblock
if i + 1 < len(self.tile):
next_offset = self.tile[i + 1].offset
if next_offset > offset:
read_bytes = next_offset - offset
try:
s = read(read_bytes)
except (IndexError, struct.error) as e:
# truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
msg = "image file is truncated"
raise OSError(msg) from e
if not s: # truncated jpeg
if LOAD_TRUNCATED_IMAGES:
break
else:
msg = (
"image file is truncated "
f"({len(b)} bytes not processed)"
)
raise OSError(msg)
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
finally:
# Need to cleanup here to prevent leaks
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.load_end()
if self._exclusive_fp and self._close_exclusive_fp_after_loading:
self.fp.close()
self.fp = None
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise _get_oserror(err_code, encoder=False)
return Image.Image.load(self)
def load_prepare(self) -> None:
# create image memory if necessary
if self._im is None:
self.im = Image.core.new(self.mode, self.size)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def load_end(self) -> None:
# may be overridden
pass
# may be defined for contained formats
# def load_seek(self, pos: int) -> None:
# pass
# may be defined for blocked formats (e.g. PNG)
# def load_read(self, read_bytes: int) -> bytes:
# pass
def _seek_check(self, frame: int) -> bool:
if (
frame < self._min_frame
# Only check upper limit on frames if additional seek operations
# are not required to do so
or (
not (hasattr(self, "_n_frames") and self._n_frames is None)
and frame >= getattr(self, "n_frames") + self._min_frame
)
):
msg = "attempt to seek outside sequence"
raise EOFError(msg)
return self.tell() != frame
class StubHandler(abc.ABC):
def open(self, im: StubImageFile) -> None:
pass
@abc.abstractmethod
def load(self, im: StubImageFile) -> Image.Image:
pass
class StubImageFile(ImageFile, metaclass=abc.ABCMeta):
"""
Base class for stub image loaders.
A stub loader is an image loader that can identify files of a
certain format, but relies on external code to load the file.
"""
@abc.abstractmethod
def _open(self) -> None:
pass
def load(self) -> Image.core.PixelAccess | None:
loader = self._load()
if loader is None:
msg = f"cannot find loader for this {self.format} file"
raise OSError(msg)
image = loader.load(self)
assert image is not None
# become the other object (!)
self.__class__ = image.__class__ # type: ignore[assignment]
self.__dict__ = image.__dict__
return image.load()
@abc.abstractmethod
def _load(self) -> StubHandler | None:
"""(Hook) Find actual image loader."""
pass
class Parser:
"""
Incremental image parser. This class implements the standard
feed/close consumer interface.
"""
incremental = None
image: Image.Image | None = None
data: bytes | None = None
decoder: Image.core.ImagingDecoder | PyDecoder | None = None
offset = 0
finished = 0
def reset(self) -> None:
"""
(Consumer) Reset the parser. Note that you can only call this
method immediately after you've created a parser; parser
instances cannot be reused.
"""
assert self.data is None, "cannot reuse parsers"
def feed(self, data: bytes) -> None:
"""
(Consumer) Feed data to the parser.
:param data: A string buffer.
:exception OSError: If the parser failed to parse the image file.
"""
# collect data
if self.finished:
return
if self.data is None:
self.data = data
else:
self.data = self.data + data
# parse what we have
if self.decoder:
if self.offset > 0:
# skip header
skip = min(len(self.data), self.offset)
self.data = self.data[skip:]
self.offset = self.offset - skip
if self.offset > 0 or not self.data:
return
n, e = self.decoder.decode(self.data)
if n < 0:
# end of stream
self.data = None
self.finished = 1
if e < 0:
# decoding error
self.image = None
raise _get_oserror(e, encoder=False)
else:
# end of image
return
self.data = self.data[n:]
elif self.image:
# if we end up here with no decoder, this file cannot
# be incrementally parsed. wait until we've gotten all
# available data
pass
else:
# attempt to open this file
try:
with io.BytesIO(self.data) as fp:
im = Image.open(fp)
except OSError:
pass # not enough data
else:
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
if flag or len(im.tile) != 1:
# custom load code, or multiple tiles
self.decode = None
else:
# initialize decoder
im.load_prepare()
d, e, o, a = im.tile[0]
im.tile = []
self.decoder = Image._getdecoder(im.mode, d, a, im.decoderconfig)
self.decoder.setimage(im.im, e)
# calculate decoder offset
self.offset = o
if self.offset <= len(self.data):
self.data = self.data[self.offset :]
self.offset = 0
self.image = im
def __enter__(self) -> Parser:
return self
def __exit__(self, *args: object) -> None:
self.close()
def close(self) -> Image.Image:
"""
(Consumer) Close the stream.
:returns: An image object.
:exception OSError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
"""
# finish decoding
if self.decoder:
# get rid of what's left in the buffers
self.feed(b"")
self.data = self.decoder = None
if not self.finished:
msg = "image was incomplete"
raise OSError(msg)
if not self.image:
msg = "cannot parse this image"
raise OSError(msg)
if self.data:
# incremental parsing not possible; reopen the file
# not that we have all data
with io.BytesIO(self.data) as fp:
try:
self.image = Image.open(fp)
finally:
self.image.load()
return self.image
# --------------------------------------------------------------------
def _save(im: Image.Image, fp: IO[bytes], tile: list[_Tile], bufsize: int = 0) -> None:
"""Helper to save image based on tile list
:param im: Image object.
:param fp: File object.
:param tile: Tile list.
:param bufsize: Optional buffer size
"""
im.load()
if not hasattr(im, "encoderconfig"):
im.encoderconfig = ()
tile.sort(key=_tilesort)
# FIXME: make MAXBLOCK a configuration parameter
# It would be great if we could have the encoder specify what it needs
# But, it would need at least the image size in most cases. RawEncode is
# a tricky case.
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
try:
fh = fp.fileno()
fp.flush()
_encode_tile(im, fp, tile, bufsize, fh)
except (AttributeError, io.UnsupportedOperation) as exc:
_encode_tile(im, fp, tile, bufsize, None, exc)
if hasattr(fp, "flush"):
fp.flush()
def _encode_tile(
im: Image.Image,
fp: IO[bytes],
tile: list[_Tile],
bufsize: int,
fh: int | None,
exc: BaseException | None = None,
) -> None:
for encoder_name, extents, offset, args in tile:
if offset > 0:
fp.seek(offset)
encoder = Image._getencoder(im.mode, encoder_name, args, im.encoderconfig)
try:
encoder.setimage(im.im, extents)
if encoder.pushes_fd:
encoder.setfd(fp)
errcode = encoder.encode_to_pyfd()[1]
else:
if exc:
# compress to Python file-compatible object
while True:
errcode, data = encoder.encode(bufsize)[1:]
fp.write(data)
if errcode:
break
else:
# slight speedup: compress to real file object
assert fh is not None
errcode = encoder.encode_to_file(fh, bufsize)
if errcode < 0:
raise _get_oserror(errcode, encoder=True) from exc
finally:
encoder.cleanup()
def _safe_read(fp: IO[bytes], size: int) -> bytes:
"""
Reads large blocks in a safe way. Unlike fp.read(n), this function
doesn't trust the user. If the requested size is larger than
SAFEBLOCK, the file is read block by block.
:param fp: File handle. Must implement a read method.
:param size: Number of bytes to read.
:returns: A string containing size bytes of data.
Raises an OSError if the file is truncated and the read cannot be completed
"""
if size <= 0:
return b""
if size <= SAFEBLOCK:
data = fp.read(size)
if len(data) < size:
msg = "Truncated File Read"
raise OSError(msg)
return data
blocks: list[bytes] = []
remaining_size = size
while remaining_size > 0:
block = fp.read(min(remaining_size, SAFEBLOCK))
if not block:
break
blocks.append(block)
remaining_size -= len(block)
if sum(len(block) for block in blocks) < size:
msg = "Truncated File Read"
raise OSError(msg)
return b"".join(blocks)
class PyCodecState:
def __init__(self) -> None:
self.xsize = 0
self.ysize = 0
self.xoff = 0
self.yoff = 0
def extents(self) -> tuple[int, int, int, int]:
return self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize
class PyCodec:
fd: IO[bytes] | None
def __init__(self, mode: str, *args: Any) -> None:
self.im: Image.core.ImagingCore | None = None
self.state = PyCodecState()
self.fd = None
self.mode = mode
self.init(args)
def init(self, args: tuple[Any, ...]) -> None:
"""
Override to perform codec specific initialization
:param args: Tuple of arg items from the tile entry
:returns: None
"""
self.args = args
def cleanup(self) -> None:
"""
Override to perform codec specific cleanup
:returns: None
"""
pass
def setfd(self, fd: IO[bytes]) -> None:
"""
Called from ImageFile to set the Python file-like object
:param fd: A Python file-like object
:returns: None
"""
self.fd = fd
def setimage(
self,
im: Image.core.ImagingCore,
extents: tuple[int, int, int, int] | None = None,
) -> None:
"""
Called from ImageFile to set the core output image for the codec
:param im: A core image object
:param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
for this tile
:returns: None
"""
# following c code
self.im = im
if extents:
(x0, y0, x1, y1) = extents
else:
(x0, y0, x1, y1) = (0, 0, 0, 0)
if x0 == 0 and x1 == 0:
self.state.xsize, self.state.ysize = self.im.size
else:
self.state.xoff = x0
self.state.yoff = y0
self.state.xsize = x1 - x0
self.state.ysize = y1 - y0
if self.state.xsize <= 0 or self.state.ysize <= 0:
msg = "Size cannot be negative"
raise ValueError(msg)
if (
self.state.xsize + self.state.xoff > self.im.size[0]
or self.state.ysize + self.state.yoff > self.im.size[1]
):
msg = "Tile cannot extend outside image"
raise ValueError(msg)
class PyDecoder(PyCodec):
"""
Python implementation of a format decoder. Override this class and
add the decoding logic in the :meth:`decode` method.
See :ref:`Writing Your Own File Codec in Python`
"""
_pulls_fd = False
@property
def pulls_fd(self) -> bool:
return self._pulls_fd
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
"""
Override to perform the decoding process.
:param buffer: A bytes object with the data to be decoded.
:returns: A tuple of ``(bytes consumed, errcode)``.
If finished with decoding return -1 for the bytes consumed.
Err codes are from :data:`.ImageFile.ERRORS`.
"""
msg = "unavailable in base decoder"
raise NotImplementedError(msg)
def set_as_raw(
self, data: bytes, rawmode: str | None = None, extra: tuple[Any, ...] = ()
) -> None:
"""
Convenience method to set the internal image from a stream of raw data
:param data: Bytes to be set
:param rawmode: The rawmode to be used for the decoder.
If not specified, it will default to the mode of the image
:param extra: Extra arguments for the decoder.
:returns: None
"""
if not rawmode:
rawmode = self.mode
d = Image._getdecoder(self.mode, "raw", rawmode, extra)
assert self.im is not None
d.setimage(self.im, self.state.extents())
s = d.decode(data)
if s[0] >= 0:
msg = "not enough image data"
raise ValueError(msg)
if s[1] != 0:
msg = "cannot decode image data"
raise ValueError(msg)
class PyEncoder(PyCodec):
"""
Python implementation of a format encoder. Override this class and
add the decoding logic in the :meth:`encode` method.
See :ref:`Writing Your Own File Codec in Python`
"""
_pushes_fd = False
@property
def pushes_fd(self) -> bool:
return self._pushes_fd
def encode(self, bufsize: int) -> tuple[int, int, bytes]:
"""
Override to perform the encoding process.
:param bufsize: Buffer size.
:returns: A tuple of ``(bytes encoded, errcode, bytes)``.
If finished with encoding return 1 for the error code.
Err codes are from :data:`.ImageFile.ERRORS`.
"""
msg = "unavailable in base encoder"
raise NotImplementedError(msg)
def encode_to_pyfd(self) -> tuple[int, int]:
"""
If ``pushes_fd`` is ``True``, then this method will be used,
and ``encode()`` will only be called once.
:returns: A tuple of ``(bytes consumed, errcode)``.
Err codes are from :data:`.ImageFile.ERRORS`.
"""
if not self.pushes_fd:
return 0, -8 # bad configuration
bytes_consumed, errcode, data = self.encode(0)
if data:
assert self.fd is not None
self.fd.write(data)
return bytes_consumed, errcode
def encode_to_file(self, fh: int, bufsize: int) -> int:
"""
:param fh: File handle.
:param bufsize: Buffer size.
:returns: If finished successfully, return 0.
Otherwise, return an error code. Err codes are from
:data:`.ImageFile.ERRORS`.
"""
errcode = 0
while errcode == 0:
status, errcode, buf = self.encode(bufsize)
if status > 0:
os.write(fh, buf[status:])
return errcode
venv\Lib\site-packages\PIL\ImageFilter.py
#
# The Python Imaging Library.
# $Id$
#
# standard filters
#
# History:
# 1995-11-27 fl Created
# 2002-06-08 fl Added rank and mode filters
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2002 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import abc
import functools
from collections.abc import Sequence
from types import ModuleType
from typing import Any, Callable, cast
TYPE_CHECKING = False
if TYPE_CHECKING:
from . import _imaging
from ._typing import NumpyArray
class Filter(abc.ABC):
@abc.abstractmethod
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
pass
class MultibandFilter(Filter):
pass
class BuiltinFilter(MultibandFilter):
filterargs: tuple[Any, ...]
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
if image.mode == "P":
msg = "cannot filter palette images"
raise ValueError(msg)
return image.filter(*self.filterargs)
class Kernel(BuiltinFilter):
"""
Create a convolution kernel. This only supports 3x3 and 5x5 integer and floating
point kernels.
Kernels can only be applied to "L" and "RGB" images.
:param size: Kernel size, given as (width, height). This must be (3,3) or (5,5).
:param kernel: A sequence containing kernel weights. The kernel will be flipped
vertically before being applied to the image.
:param scale: Scale factor. If given, the result for each pixel is divided by this
value. The default is the sum of the kernel weights.
:param offset: Offset. If given, this value is added to the result, after it has
been divided by the scale factor.
"""
name = "Kernel"
def __init__(
self,
size: tuple[int, int],
kernel: Sequence[float],
scale: float | None = None,
offset: float = 0,
) -> None:
if scale is None:
# default scale is sum of kernel
scale = functools.reduce(lambda a, b: a + b, kernel)
if size[0] * size[1] != len(kernel):
msg = "not enough coefficients in kernel"
raise ValueError(msg)
self.filterargs = size, scale, offset, kernel
class RankFilter(Filter):
"""
Create a rank filter. The rank filter sorts all pixels in
a window of the given size, and returns the ``rank``'th value.
:param size: The kernel size, in pixels.
:param rank: What pixel value to pick. Use 0 for a min filter,
``size * size / 2`` for a median filter, ``size * size - 1``
for a max filter, etc.
"""
name = "Rank"
def __init__(self, size: int, rank: int) -> None:
self.size = size
self.rank = rank
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
if image.mode == "P":
msg = "cannot filter palette images"
raise ValueError(msg)
image = image.expand(self.size // 2, self.size // 2)
return image.rankfilter(self.size, self.rank)
class MedianFilter(RankFilter):
"""
Create a median filter. Picks the median pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Median"
def __init__(self, size: int = 3) -> None:
self.size = size
self.rank = size * size // 2
class MinFilter(RankFilter):
"""
Create a min filter. Picks the lowest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Min"
def __init__(self, size: int = 3) -> None:
self.size = size
self.rank = 0
class MaxFilter(RankFilter):
"""
Create a max filter. Picks the largest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Max"
def __init__(self, size: int = 3) -> None:
self.size = size
self.rank = size * size - 1
class ModeFilter(Filter):
"""
Create a mode filter. Picks the most frequent pixel value in a box with the
given size. Pixel values that occur only once or twice are ignored; if no
pixel value occurs more than twice, the original pixel value is preserved.
:param size: The kernel size, in pixels.
"""
name = "Mode"
def __init__(self, size: int = 3) -> None:
self.size = size
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
return image.modefilter(self.size)
class GaussianBlur(MultibandFilter):
"""Blurs the image with a sequence of extended box filters, which
approximates a Gaussian kernel. For details on accuracy see
:param radius: Standard deviation of the Gaussian kernel. Either a sequence of two
numbers for x and y, or a single number for both.
"""
name = "GaussianBlur"
def __init__(self, radius: float | Sequence[float] = 2) -> None:
self.radius = radius
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
xy = self.radius
if isinstance(xy, (int, float)):
xy = (xy, xy)
if xy == (0, 0):
return image.copy()
return image.gaussian_blur(xy)
class BoxBlur(MultibandFilter):
"""Blurs the image by setting each pixel to the average value of the pixels
in a square box extending radius pixels in each direction.
Supports float radius of arbitrary size. Uses an optimized implementation
which runs in linear time relative to the size of the image
for any radius value.
:param radius: Size of the box in a direction. Either a sequence of two numbers for
x and y, or a single number for both.
Radius 0 does not blur, returns an identical image.
Radius 1 takes 1 pixel in each direction, i.e. 9 pixels in total.
"""
name = "BoxBlur"
def __init__(self, radius: float | Sequence[float]) -> None:
xy = radius if isinstance(radius, (tuple, list)) else (radius, radius)
if xy[0] < 0 or xy[1] < 0:
msg = "radius must be >= 0"
raise ValueError(msg)
self.radius = radius
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
xy = self.radius
if isinstance(xy, (int, float)):
xy = (xy, xy)
if xy == (0, 0):
return image.copy()
return image.box_blur(xy)
class UnsharpMask(MultibandFilter):
"""Unsharp mask filter.
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
the parameters.
:param radius: Blur Radius
:param percent: Unsharp strength, in percent
:param threshold: Threshold controls the minimum brightness change that
will be sharpened
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
"""
name = "UnsharpMask"
def __init__(
self, radius: float = 2, percent: int = 150, threshold: int = 3
) -> None:
self.radius = radius
self.percent = percent
self.threshold = threshold
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
return image.unsharp_mask(self.radius, self.percent, self.threshold)
class BLUR(BuiltinFilter):
name = "Blur"
# fmt: off
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1,
)
# fmt: on
class CONTOUR(BuiltinFilter):
name = "Contour"
# fmt: off
filterargs = (3, 3), 1, 255, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1,
)
# fmt: on
class DETAIL(BuiltinFilter):
name = "Detail"
# fmt: off
filterargs = (3, 3), 6, 0, (
0, -1, 0,
-1, 10, -1,
0, -1, 0,
)
# fmt: on
class EDGE_ENHANCE(BuiltinFilter):
name = "Edge-enhance"
# fmt: off
filterargs = (3, 3), 2, 0, (
-1, -1, -1,
-1, 10, -1,
-1, -1, -1,
)
# fmt: on
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = "Edge-enhance More"
# fmt: off
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1,
)
# fmt: on
class EMBOSS(BuiltinFilter):
name = "Emboss"
# fmt: off
filterargs = (3, 3), 1, 128, (
-1, 0, 0,
0, 1, 0,
0, 0, 0,
)
# fmt: on
class FIND_EDGES(BuiltinFilter):
name = "Find Edges"
# fmt: off
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1,
)
# fmt: on
class SHARPEN(BuiltinFilter):
name = "Sharpen"
# fmt: off
filterargs = (3, 3), 16, 0, (
-2, -2, -2,
-2, 32, -2,
-2, -2, -2,
)
# fmt: on
class SMOOTH(BuiltinFilter):
name = "Smooth"
# fmt: off
filterargs = (3, 3), 13, 0, (
1, 1, 1,
1, 5, 1,
1, 1, 1,
)
# fmt: on
class SMOOTH_MORE(BuiltinFilter):
name = "Smooth More"
# fmt: off
filterargs = (5, 5), 100, 0, (
1, 1, 1, 1, 1,
1, 5, 5, 5, 1,
1, 5, 44, 5, 1,
1, 5, 5, 5, 1,
1, 1, 1, 1, 1,
)
# fmt: on
class Color3DLUT(MultibandFilter):
"""Three-dimensional color lookup table.
Transforms 3-channel pixels using the values of the channels as coordinates
in the 3D lookup table and interpolating the nearest elements.
This method allows you to apply almost any color transformation
in constant time by using pre-calculated decimated tables.
.. versionadded:: 5.2.0
:param size: Size of the table. One int or tuple of (int, int, int).
Minimal size in any dimension is 2, maximum is 65.
:param table: Flat lookup table. A list of ``channels * size**3``
float elements or a list of ``size**3`` channels-sized
tuples with floats. Channels are changed first,
then first dimension, then second, then third.
Value 0.0 corresponds lowest value of output, 1.0 highest.
:param channels: Number of channels in the table. Could be 3 or 4.
Default is 3.
:param target_mode: A mode for the result image. Should have not less
than ``channels`` channels. Default is ``None``,
which means that mode wouldn't be changed.
"""
name = "Color 3D LUT"
def __init__(
self,
size: int | tuple[int, int, int],
table: Sequence[float] | Sequence[Sequence[int]] | NumpyArray,
channels: int = 3,
target_mode: str | None = None,
**kwargs: bool,
) -> None:
if channels not in (3, 4):
msg = "Only 3 or 4 output channels are supported"
raise ValueError(msg)
self.size = size = self._check_size(size)
self.channels = channels
self.mode = target_mode
# Hidden flag `_copy_table=False` could be used to avoid extra copying
# of the table if the table is specially made for the constructor.
copy_table = kwargs.get("_copy_table", True)
items = size[0] * size[1] * size[2]
wrong_size = False
numpy: ModuleType | None = None
if hasattr(table, "shape"):
try:
import numpy
except ImportError:
pass
if numpy and isinstance(table, numpy.ndarray):
numpy_table: NumpyArray = table
if copy_table:
numpy_table = numpy_table.copy()
if numpy_table.shape in [
(items * channels,),
(items, channels),
(size[2], size[1], size[0], channels),
]:
table = numpy_table.reshape(items * channels)
else:
wrong_size = True
else:
if copy_table:
table = list(table)
# Convert to a flat list
if table and isinstance(table[0], (list, tuple)):
raw_table = cast(Sequence[Sequence[int]], table)
flat_table: list[int] = []
for pixel in raw_table:
if len(pixel) != channels:
msg = (
"The elements of the table should "
f"have a length of {channels}."
)
raise ValueError(msg)
flat_table.extend(pixel)
table = flat_table
if wrong_size or len(table) != items * channels:
msg = (
"The table should have either channels * size**3 float items "
"or size**3 items of channels-sized tuples with floats. "
f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. "
f"Actual length: {len(table)}"
)
raise ValueError(msg)
self.table = table
@staticmethod
def _check_size(size: Any) -> tuple[int, int, int]:
try:
_, _, _ = size
except ValueError as e:
msg = "Size should be either an integer or a tuple of three integers."
raise ValueError(msg) from e
except TypeError:
size = (size, size, size)
size = tuple(int(x) for x in size)
for size_1d in size:
if not 2 <= size_1d <= 65:
msg = "Size should be in [2, 65] range."
raise ValueError(msg)
return size
@classmethod
def generate(
cls,
size: int | tuple[int, int, int],
callback: Callable[[float, float, float], tuple[float, ...]],
channels: int = 3,
target_mode: str | None = None,
) -> Color3DLUT:
"""Generates new LUT using provided callback.
:param size: Size of the table. Passed to the constructor.
:param callback: Function with three parameters which correspond
three color channels. Will be called ``size**3``
times with values from 0.0 to 1.0 and should return
a tuple with ``channels`` elements.
:param channels: The number of channels which should return callback.
:param target_mode: Passed to the constructor of the resulting
lookup table.
"""
size_1d, size_2d, size_3d = cls._check_size(size)
if channels not in (3, 4):
msg = "Only 3 or 4 output channels are supported"
raise ValueError(msg)
table: list[float] = [0] * (size_1d * size_2d * size_3d * channels)
idx_out = 0
for b in range(size_3d):
for g in range(size_2d):
for r in range(size_1d):
table[idx_out : idx_out + channels] = callback(
r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1)
)
idx_out += channels
return cls(
(size_1d, size_2d, size_3d),
table,
channels=channels,
target_mode=target_mode,
_copy_table=False,
)
def transform(
self,
callback: Callable[..., tuple[float, ...]],
with_normals: bool = False,
channels: int | None = None,
target_mode: str | None = None,
) -> Color3DLUT:
"""Transforms the table values using provided callback and returns
a new LUT with altered values.
:param callback: A function which takes old lookup table values
and returns a new set of values. The number
of arguments which function should take is
``self.channels`` or ``3 + self.channels``
if ``with_normals`` flag is set.
Should return a tuple of ``self.channels`` or
``channels`` elements if it is set.
:param with_normals: If true, ``callback`` will be called with
coordinates in the color cube as the first
three arguments. Otherwise, ``callback``
will be called only with actual color values.
:param channels: The number of channels in the resulting lookup table.
:param target_mode: Passed to the constructor of the resulting
lookup table.
"""
if channels not in (None, 3, 4):
msg = "Only 3 or 4 output channels are supported"
raise ValueError(msg)
ch_in = self.channels
ch_out = channels or ch_in
size_1d, size_2d, size_3d = self.size
table: list[float] = [0] * (size_1d * size_2d * size_3d * ch_out)
idx_in = 0
idx_out = 0
for b in range(size_3d):
for g in range(size_2d):
for r in range(size_1d):
values = self.table[idx_in : idx_in + ch_in]
if with_normals:
values = callback(
r / (size_1d - 1),
g / (size_2d - 1),
b / (size_3d - 1),
*values,
)
else:
values = callback(*values)
table[idx_out : idx_out + ch_out] = values
idx_in += ch_in
idx_out += ch_out
return type(self)(
self.size,
table,
channels=ch_out,
target_mode=target_mode or self.mode,
_copy_table=False,
)
def __repr__(self) -> str:
r = [
f"{self.__class__.__name__} from {self.table.__class__.__name__}",
"size={:d}x{:d}x{:d}".format(*self.size),
f"channels={self.channels:d}",
]
if self.mode:
r.append(f"target_mode={self.mode}")
return "<{}>".format(" ".join(r))
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
from . import Image
return image.color_lut_3d(
self.mode or image.mode,
Image.Resampling.BILINEAR,
self.channels,
self.size,
self.table,
)
venv\Lib\site-packages\PIL\ImageFont.py
#
# The Python Imaging Library.
# $Id$
#
# PIL raster font management
#
# History:
# 1996-08-07 fl created (experimental)
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
# 1999-02-06 fl rewrote most font management stuff in C
# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)
# 2001-02-17 fl added freetype support
# 2001-05-09 fl added TransposedFont wrapper class
# 2002-03-04 fl make sure we have a "L" or "1" font
# 2002-12-04 fl skip non-directory entries in the system path
# 2003-04-29 fl add embedded default font
# 2003-09-27 fl added support for truetype charmap encodings
#
# Todo:
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import base64
import os
import sys
import warnings
from enum import IntEnum
from io import BytesIO
from types import ModuleType
from typing import IO, Any, BinaryIO, TypedDict, cast
from . import Image, features
from ._typing import StrOrBytesPath
from ._util import DeferredError, is_path
TYPE_CHECKING = False
if TYPE_CHECKING:
from . import ImageFile
from ._imaging import ImagingFont
from ._imagingft import Font
class Axis(TypedDict):
minimum: int | None
default: int | None
maximum: int | None
name: bytes | None
class Layout(IntEnum):
BASIC = 0
RAQM = 1
MAX_STRING_LENGTH = 1_000_000
core: ModuleType | DeferredError
try:
from . import _imagingft as core
except ImportError as ex:
core = DeferredError.new(ex)
def _string_length_check(text: str | bytes | bytearray) -> None:
if MAX_STRING_LENGTH is not None and len(text) > MAX_STRING_LENGTH:
msg = "too many characters in string"
raise ValueError(msg)
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
class ImageFont:
"""PIL font wrapper"""
font: ImagingFont
def _load_pilfont(self, filename: str) -> None:
with open(filename, "rb") as fp:
image: ImageFile.ImageFile | None = None
root = os.path.splitext(filename)[0]
for ext in (".png", ".gif", ".pbm"):
if image:
image.close()
try:
fullname = root + ext
image = Image.open(fullname)
except Exception:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
if image:
image.close()
msg = f"cannot find glyph data file {root}.{{gif|pbm|png}}"
raise OSError(msg)
self.file = fullname
self._load_pilfont_data(fp, image)
image.close()
def _load_pilfont_data(self, file: IO[bytes], image: Image.Image) -> None:
# read PILfont header
if file.readline() != b"PILfont\n":
msg = "Not a PILfont file"
raise SyntaxError(msg)
file.readline().split(b";")
self.info = [] # FIXME: should be a dictionary
while True:
s = file.readline()
if not s or s == b"DATA\n":
break
self.info.append(s)
# read PILfont metrics
data = file.read(256 * 20)
# check image
if image.mode not in ("1", "L"):
msg = "invalid font image mode"
raise TypeError(msg)
image.load()
self.font = Image.core.font(image.im, data)
def getmask(
self, text: str | bytes, mode: str = "", *args: Any, **kwargs: Any
) -> Image.core.ImagingCore:
"""
Create a bitmap for the text.
If the font uses antialiasing, the bitmap should have mode ``L`` and use a
maximum value of 255. Otherwise, it should have mode ``1``.
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
.. versionadded:: 1.1.5
:return: An internal PIL storage memory instance as defined by the
:py:mod:`PIL.Image.core` interface module.
"""
_string_length_check(text)
Image._decompression_bomb_check(self.font.getsize(text))
return self.font.getmask(text, mode)
def getbbox(
self, text: str | bytes | bytearray, *args: Any, **kwargs: Any
) -> tuple[int, int, int, int]:
"""
Returns bounding box (in pixels) of given text.
.. versionadded:: 9.2.0
:param text: Text to render.
:return: ``(left, top, right, bottom)`` bounding box
"""
_string_length_check(text)
width, height = self.font.getsize(text)
return 0, 0, width, height
def getlength(
self, text: str | bytes | bytearray, *args: Any, **kwargs: Any
) -> int:
"""
Returns length (in pixels) of given text.
This is the amount by which following text should be offset.
.. versionadded:: 9.2.0
"""
_string_length_check(text)
width, height = self.font.getsize(text)
return width
##
# Wrapper for FreeType fonts. Application code should use the
# truetype factory function to create font objects.
class FreeTypeFont:
"""FreeType font wrapper (requires _imagingft service)"""
font: Font
font_bytes: bytes
def __init__(
self,
font: StrOrBytesPath | BinaryIO,
size: float = 10,
index: int = 0,
encoding: str = "",
layout_engine: Layout | None = None,
) -> None:
# FIXME: use service provider instead
if isinstance(core, DeferredError):
raise core.ex
if size <= 0:
msg = f"font size must be greater than 0, not {size}"
raise ValueError(msg)
self.path = font
self.size = size
self.index = index
self.encoding = encoding
try:
from packaging.version import parse as parse_version
except ImportError:
pass
else:
if freetype_version := features.version_module("freetype2"):
if parse_version(freetype_version) < parse_version("2.9.1"):
warnings.warn(
"Support for FreeType 2.9.0 is deprecated and will be removed "
"in Pillow 12 (2025-10-15). Please upgrade to FreeType 2.9.1 "
"or newer, preferably FreeType 2.10.4 which fixes "
"CVE-2020-15999.",
DeprecationWarning,
)
if layout_engine not in (Layout.BASIC, Layout.RAQM):
layout_engine = Layout.BASIC
if core.HAVE_RAQM:
layout_engine = Layout.RAQM
elif layout_engine == Layout.RAQM and not core.HAVE_RAQM:
warnings.warn(
"Raqm layout was requested, but Raqm is not available. "
"Falling back to basic layout."
)
layout_engine = Layout.BASIC
self.layout_engine = layout_engine
def load_from_bytes(f: IO[bytes]) -> None:
self.font_bytes = f.read()
self.font = core.getfont(
"", size, index, encoding, self.font_bytes, layout_engine
)
if is_path(font):
font = os.fspath(font)
if sys.platform == "win32":
font_bytes_path = font if isinstance(font, bytes) else font.encode()
try:
font_bytes_path.decode("ascii")
except UnicodeDecodeError:
# FreeType cannot load fonts with non-ASCII characters on Windows
# So load it into memory first
with open(font, "rb") as f:
load_from_bytes(f)
return
self.font = core.getfont(
font, size, index, encoding, layout_engine=layout_engine
)
else:
load_from_bytes(cast(IO[bytes], font))
def __getstate__(self) -> list[Any]:
return [self.path, self.size, self.index, self.encoding, self.layout_engine]
def __setstate__(self, state: list[Any]) -> None:
path, size, index, encoding, layout_engine = state
FreeTypeFont.__init__(self, path, size, index, encoding, layout_engine)
def getname(self) -> tuple[str | None, str | None]:
"""
:return: A tuple of the font family (e.g. Helvetica) and the font style
(e.g. Bold)
"""
return self.font.family, self.font.style
def getmetrics(self) -> tuple[int, int]:
"""
:return: A tuple of the font ascent (the distance from the baseline to
the highest outline point) and descent (the distance from the
baseline to the lowest outline point, a negative value)
"""
return self.font.ascent, self.font.descent
def getlength(
self,
text: str | bytes,
mode: str = "",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
) -> float:
"""
Returns length (in pixels with 1/64 precision) of given text when rendered
in font with provided direction, features, and language.
This is the amount by which following text should be offset.
Text bounding box may extend past the length in some fonts,
e.g. when using italics or accents.
The result is returned as a float; it is a whole number if using basic layout.
Note that the sum of two lengths may not equal the length of a concatenated
string due to kerning. If you need to adjust for kerning, include the following
character and subtract its length.
For example, instead of ::
hello = font.getlength("Hello")
world = font.getlength("World")
hello_world = hello + world # not adjusted for kerning
assert hello_world == font.getlength("HelloWorld") # may fail
use ::
hello = font.getlength("HelloW") - font.getlength("W") # adjusted for kerning
world = font.getlength("World")
hello_world = hello + world # adjusted for kerning
assert hello_world == font.getlength("HelloWorld") # True
or disable kerning with (requires libraqm) ::
hello = draw.textlength("Hello", font, features=["-kern"])
world = draw.textlength("World", font, features=["-kern"])
hello_world = hello + world # kerning is disabled, no need to adjust
assert hello_world == draw.textlength("HelloWorld", font, features=["-kern"])
.. versionadded:: 8.0.0
:param text: Text to measure.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
`_
Requires libraqm.
:return: Either width for horizontal text, or height for vertical text.
"""
_string_length_check(text)
return self.font.getlength(text, mode, direction, features, language) / 64
def getbbox(
self,
text: str | bytes,
mode: str = "",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
stroke_width: float = 0,
anchor: str | None = None,
) -> tuple[float, float, float, float]:
"""
Returns bounding box (in pixels) of given text relative to given anchor
when rendered in font with provided direction, features, and language.
Use :py:meth:`getlength()` to get the offset of following text with
1/64 pixel precision. The bounding box includes extra margins for
some fonts, e.g. italics or accents.
.. versionadded:: 8.0.0
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
`_
Requires libraqm.
:param stroke_width: The width of the text stroke.
:param anchor: The text anchor alignment. Determines the relative location of
the anchor to the text. The default alignment is top left,
specifically ``la`` for horizontal text and ``lt`` for
vertical text. See :ref:`text-anchors` for details.
:return: ``(left, top, right, bottom)`` bounding box
"""
_string_length_check(text)
size, offset = self.font.getsize(
text, mode, direction, features, language, anchor
)
left, top = offset[0] - stroke_width, offset[1] - stroke_width
width, height = size[0] + 2 * stroke_width, size[1] + 2 * stroke_width
return left, top, left + width, top + height
def getmask(
self,
text: str | bytes,
mode: str = "",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
stroke_width: float = 0,
anchor: str | None = None,
ink: int = 0,
start: tuple[float, float] | None = None,
) -> Image.core.ImagingCore:
"""
Create a bitmap for the text.
If the font uses antialiasing, the bitmap should have mode ``L`` and use a
maximum value of 255. If the font has embedded color data, the bitmap
should have mode ``RGBA``. Otherwise, it should have mode ``1``.
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
.. versionadded:: 1.1.5
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
.. versionadded:: 4.2.0
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
.. versionadded:: 4.2.0
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
`_
Requires libraqm.
.. versionadded:: 6.0.0
:param stroke_width: The width of the text stroke.
.. versionadded:: 6.2.0
:param anchor: The text anchor alignment. Determines the relative location of
the anchor to the text. The default alignment is top left,
specifically ``la`` for horizontal text and ``lt`` for
vertical text. See :ref:`text-anchors` for details.
.. versionadded:: 8.0.0
:param ink: Foreground ink for rendering in RGBA mode.
.. versionadded:: 8.0.0
:param start: Tuple of horizontal and vertical offset, as text may render
differently when starting at fractional coordinates.
.. versionadded:: 9.4.0
:return: An internal PIL storage memory instance as defined by the
:py:mod:`PIL.Image.core` interface module.
"""
return self.getmask2(
text,
mode,
direction=direction,
features=features,
language=language,
stroke_width=stroke_width,
anchor=anchor,
ink=ink,
start=start,
)[0]
def getmask2(
self,
text: str | bytes,
mode: str = "",
direction: str | None = None,
features: list[str] | None = None,
language: str | None = None,
stroke_width: float = 0,
anchor: str | None = None,
ink: int = 0,
start: tuple[float, float] | None = None,
*args: Any,
**kwargs: Any,
) -> tuple[Image.core.ImagingCore, tuple[int, int]]:
"""
Create a bitmap for the text.
If the font uses antialiasing, the bitmap should have mode ``L`` and use a
maximum value of 255. If the font has embedded color data, the bitmap
should have mode ``RGBA``. Otherwise, it should have mode ``1``.
:param text: Text to render.
:param mode: Used by some graphics drivers to indicate what mode the
driver prefers; if empty, the renderer may return either
mode. Note that the mode is always a string, to simplify
C-level implementations.
.. versionadded:: 1.1.5
:param direction: Direction of the text. It can be 'rtl' (right to
left), 'ltr' (left to right) or 'ttb' (top to bottom).
Requires libraqm.
.. versionadded:: 4.2.0
:param features: A list of OpenType font features to be used during text
layout. This is usually used to turn on optional
font features that are not enabled by default,
for example 'dlig' or 'ss01', but can be also
used to turn off default font features for
example '-liga' to disable ligatures or '-kern'
to disable kerning. To get all supported
features, see
https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist
Requires libraqm.
.. versionadded:: 4.2.0
:param language: Language of the text. Different languages may use
different glyph shapes or ligatures. This parameter tells
the font which language the text is in, and to apply the
correct substitutions as appropriate, if available.
It should be a `BCP 47 language code
`_
Requires libraqm.
.. versionadded:: 6.0.0
:param stroke_width: The width of the text stroke.
.. versionadded:: 6.2.0
:param anchor: The text anchor alignment. Determines the relative location of
the anchor to the text. The default alignment is top left,
specifically ``la`` for horizontal text and ``lt`` for
vertical text. See :ref:`text-anchors` for details.
.. versionadded:: 8.0.0
:param ink: Foreground ink for rendering in RGBA mode.
.. versionadded:: 8.0.0
:param start: Tuple of horizontal and vertical offset, as text may render
differently when starting at fractional coordinates.
.. versionadded:: 9.4.0
:return: A tuple of an internal PIL storage memory instance as defined by the
:py:mod:`PIL.Image.core` interface module, and the text offset, the
gap between the starting coordinate and the first marking
"""
_string_length_check(text)
if start is None:
start = (0, 0)
def fill(width: int, height: int) -> Image.core.ImagingCore:
size = (width, height)
Image._decompression_bomb_check(size)
return Image.core.fill("RGBA" if mode == "RGBA" else "L", size)
return self.font.render(
text,
fill,
mode,
direction,
features,
language,
stroke_width,
kwargs.get("stroke_filled", False),
anchor,
ink,
start,
)
def font_variant(
self,
font: StrOrBytesPath | BinaryIO | None = None,
size: float | None = None,
index: int | None = None,
encoding: str | None = None,
layout_engine: Layout | None = None,
) -> FreeTypeFont:
"""
Create a copy of this FreeTypeFont object,
using any specified arguments to override the settings.
Parameters are identical to the parameters used to initialize this
object.
:return: A FreeTypeFont object.
"""
if font is None:
try:
font = BytesIO(self.font_bytes)
except AttributeError:
font = self.path
return FreeTypeFont(
font=font,
size=self.size if size is None else size,
index=self.index if index is None else index,
encoding=self.encoding if encoding is None else encoding,
layout_engine=layout_engine or self.layout_engine,
)
def get_variation_names(self) -> list[bytes]:
"""
:returns: A list of the named styles in a variation font.
:exception OSError: If the font is not a variation font.
"""
try:
names = self.font.getvarnames()
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
return [name.replace(b"\x00", b"") for name in names]
def set_variation_by_name(self, name: str | bytes) -> None:
"""
:param name: The name of the style.
:exception OSError: If the font is not a variation font.
"""
names = self.get_variation_names()
if not isinstance(name, bytes):
name = name.encode()
index = names.index(name) + 1
if index == getattr(self, "_last_variation_index", None):
# When the same name is set twice in a row,
# there is an 'unknown freetype error'
# https://savannah.nongnu.org/bugs/?56186
return
self._last_variation_index = index
self.font.setvarname(index)
def get_variation_axes(self) -> list[Axis]:
"""
:returns: A list of the axes in a variation font.
:exception OSError: If the font is not a variation font.
"""
try:
axes = self.font.getvaraxes()
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
for axis in axes:
if axis["name"]:
axis["name"] = axis["name"].replace(b"\x00", b"")
return axes
def set_variation_by_axes(self, axes: list[float]) -> None:
"""
:param axes: A list of values for each axis.
:exception OSError: If the font is not a variation font.
"""
try:
self.font.setvaraxes(axes)
except AttributeError as e:
msg = "FreeType 2.9.1 or greater is required"
raise NotImplementedError(msg) from e
class TransposedFont:
"""Wrapper for writing rotated or mirrored text"""
def __init__(
self, font: ImageFont | FreeTypeFont, orientation: Image.Transpose | None = None
):
"""
Wrapper that creates a transposed font from any existing font
object.
:param font: A font object.
:param orientation: An optional orientation. If given, this should
be one of Image.Transpose.FLIP_LEFT_RIGHT, Image.Transpose.FLIP_TOP_BOTTOM,
Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_180, or
Image.Transpose.ROTATE_270.
"""
self.font = font
self.orientation = orientation # any 'transpose' argument, or None
def getmask(
self, text: str | bytes, mode: str = "", *args: Any, **kwargs: Any
) -> Image.core.ImagingCore:
im = self.font.getmask(text, mode, *args, **kwargs)
if self.orientation is not None:
return im.transpose(self.orientation)
return im
def getbbox(
self, text: str | bytes, *args: Any, **kwargs: Any
) -> tuple[int, int, float, float]:
# TransposedFont doesn't support getmask2, move top-left point to (0, 0)
# this has no effect on ImageFont and simulates anchor="lt" for FreeTypeFont
left, top, right, bottom = self.font.getbbox(text, *args, **kwargs)
width = right - left
height = bottom - top
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270):
return 0, 0, height, width
return 0, 0, width, height
def getlength(self, text: str | bytes, *args: Any, **kwargs: Any) -> float:
if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270):
msg = "text length is undefined for text rotated by 90 or 270 degrees"
raise ValueError(msg)
return self.font.getlength(text, *args, **kwargs)
def load(filename: str) -> ImageFont:
"""
Load a font file. This function loads a font object from the given
bitmap font file, and returns the corresponding font object. For loading TrueType
or OpenType fonts instead, see :py:func:`~PIL.ImageFont.truetype`.
:param filename: Name of font file.
:return: A font object.
:exception OSError: If the file could not be read.
"""
f = ImageFont()
f._load_pilfont(filename)
return f
def truetype(
font: StrOrBytesPath | BinaryIO,
size: float = 10,
index: int = 0,
encoding: str = "",
layout_engine: Layout | None = None,
) -> FreeTypeFont:
"""
Load a TrueType or OpenType font from a file or file-like object,
and create a font object. This function loads a font object from the given
file or file-like object, and creates a font object for a font of the given
size. For loading bitmap fonts instead, see :py:func:`~PIL.ImageFont.load`
and :py:func:`~PIL.ImageFont.load_path`.
Pillow uses FreeType to open font files. On Windows, be aware that FreeType
will keep the file open as long as the FreeTypeFont object exists. Windows
limits the number of files that can be open in C at once to 512, so if many
fonts are opened simultaneously and that limit is approached, an
``OSError`` may be thrown, reporting that FreeType "cannot open resource".
A workaround would be to copy the file(s) into memory, and open that instead.
This function requires the _imagingft service.
:param font: A filename or file-like object containing a TrueType font.
If the file is not found in this filename, the loader may also
search in other directories, such as:
* The :file:`fonts/` directory on Windows,
* :file:`/Library/Fonts/`, :file:`/System/Library/Fonts/`
and :file:`~/Library/Fonts/` on macOS.
* :file:`~/.local/share/fonts`, :file:`/usr/local/share/fonts`,
and :file:`/usr/share/fonts` on Linux; or those specified by
the ``XDG_DATA_HOME`` and ``XDG_DATA_DIRS`` environment variables
for user-installed and system-wide fonts, respectively.
:param size: The requested size, in pixels.
:param index: Which font face to load (default is first available face).
:param encoding: Which font encoding to use (default is Unicode). Possible
encodings include (see the FreeType documentation for more
information):
* "unic" (Unicode)
* "symb" (Microsoft Symbol)
* "ADOB" (Adobe Standard)
* "ADBE" (Adobe Expert)
* "ADBC" (Adobe Custom)
* "armn" (Apple Roman)
* "sjis" (Shift JIS)
* "gb " (PRC)
* "big5"
* "wans" (Extended Wansung)
* "joha" (Johab)
* "lat1" (Latin-1)
This specifies the character set to use. It does not alter the
encoding of any text provided in subsequent operations.
:param layout_engine: Which layout engine to use, if available:
:attr:`.ImageFont.Layout.BASIC` or :attr:`.ImageFont.Layout.RAQM`.
If it is available, Raqm layout will be used by default.
Otherwise, basic layout will be used.
Raqm layout is recommended for all non-English text. If Raqm layout
is not required, basic layout will have better performance.
You can check support for Raqm layout using
:py:func:`PIL.features.check_feature` with ``feature="raqm"``.
.. versionadded:: 4.2.0
:return: A font object.
:exception OSError: If the file could not be read.
:exception ValueError: If the font size is not greater than zero.
"""
def freetype(font: StrOrBytesPath | BinaryIO) -> FreeTypeFont:
return FreeTypeFont(font, size, index, encoding, layout_engine)
try:
return freetype(font)
except OSError:
if not is_path(font):
raise
ttf_filename = os.path.basename(font)
dirs = []
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "fonts"))
elif sys.platform in ("linux", "linux2"):
data_home = os.environ.get("XDG_DATA_HOME")
if not data_home:
# The freedesktop spec defines the following default directory for
# when XDG_DATA_HOME is unset or empty. This user-level directory
# takes precedence over system-level directories.
data_home = os.path.expanduser("~/.local/share")
xdg_dirs = [data_home]
data_dirs = os.environ.get("XDG_DATA_DIRS")
if not data_dirs:
# Similarly, defaults are defined for the system-level directories
data_dirs = "/usr/local/share:/usr/share"
xdg_dirs += data_dirs.split(":")
dirs += [os.path.join(xdg_dir, "fonts") for xdg_dir in xdg_dirs]
elif sys.platform == "darwin":
dirs += [
"/Library/Fonts",
"/System/Library/Fonts",
os.path.expanduser("~/Library/Fonts"),
]
ext = os.path.splitext(ttf_filename)[1]
first_font_with_a_different_extension = None
for directory in dirs:
for walkroot, walkdir, walkfilenames in os.walk(directory):
for walkfilename in walkfilenames:
if ext and walkfilename == ttf_filename:
return freetype(os.path.join(walkroot, walkfilename))
elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename:
fontpath = os.path.join(walkroot, walkfilename)
if os.path.splitext(fontpath)[1] == ".ttf":
return freetype(fontpath)
if not ext and first_font_with_a_different_extension is None:
first_font_with_a_different_extension = fontpath
if first_font_with_a_different_extension:
return freetype(first_font_with_a_different_extension)
raise
def load_path(filename: str | bytes) -> ImageFont:
"""
Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a
bitmap font along the Python path.
:param filename: Name of font file.
:return: A font object.
:exception OSError: If the file could not be read.
"""
if not isinstance(filename, str):
filename = filename.decode("utf-8")
for directory in sys.path:
try:
return load(os.path.join(directory, filename))
except OSError:
pass
msg = f'cannot find font file "{filename}" in sys.path'
if os.path.exists(filename):
msg += f', did you mean ImageFont.load("{filename}") instead?'
raise OSError(msg)
def load_default_imagefont() -> ImageFont:
f = ImageFont()
f._load_pilfont_data(
# courB08
BytesIO(
base64.b64decode(
b"""
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
+QAGAAIAzgAKANUAEw==
"""
)
),
Image.open(
BytesIO(
base64.b64decode(
b"""
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
w7IkEbzhVQAAAABJRU5ErkJggg==
"""
)
)
),
)
return f
def load_default(size: float | None = None) -> FreeTypeFont | ImageFont:
"""If FreeType support is available, load a version of Aileron Regular,
https://dotcolon.net/fonts/aileron, with a more limited character set.
Otherwise, load a "better than nothing" font.
.. versionadded:: 1.1.4
:param size: The font size of Aileron Regular.
.. versionadded:: 10.1.0
:return: A font object.
"""
if isinstance(core, ModuleType) or size is not None:
return truetype(
BytesIO(
base64.b64decode(
b"""
AAEAAAAPAIAAAwBwRkZUTYwDlUAAADFoAAAAHEdERUYAqADnAAAo8AAAACRHUE9ThhmITwAAKfgAA
AduR1NVQnHxefoAACkUAAAA4k9TLzJovoHLAAABeAAAAGBjbWFw5lFQMQAAA6gAAAGqZ2FzcP//AA
MAACjoAAAACGdseWYmRXoPAAAGQAAAHfhoZWFkE18ayQAAAPwAAAA2aGhlYQboArEAAAE0AAAAJGh
tdHjjERZ8AAAB2AAAAdBsb2NhuOexrgAABVQAAADqbWF4cAC7AEYAAAFYAAAAIG5hbWUr+h5lAAAk
OAAAA6Jwb3N0D3oPTQAAJ9wAAAEKAAEAAAABGhxJDqIhXw889QALA+gAAAAA0Bqf2QAAAADhCh2h/
2r/LgOxAyAAAAAIAAIAAAAAAAAAAQAAA8r/GgAAA7j/av9qA7EAAQAAAAAAAAAAAAAAAAAAAHQAAQ
AAAHQAQwAFAAAAAAACAAAAAQABAAAAQAAAAAAAAAADAfoBkAAFAAgCigJYAAAASwKKAlgAAAFeADI
BPgAAAAAFAAAAAAAAAAAAAAcAAAAAAAAAAAAAAABVS1dOAEAAIPsCAwL/GgDIA8oA5iAAAJMAAAAA
AhICsgAAACAAAwH0AAAAAAAAAU0AAADYAAAA8gA5AVMAVgJEAEYCRAA1AuQAKQKOAEAAsAArATsAZ
AE7AB4CMABVAkQAUADc/+EBEgAgANwAJQEv//sCRAApAkQAggJEADwCRAAtAkQAIQJEADkCRAArAk
QAMgJEACwCRAAxANwAJQDc/+ECRABnAkQAUAJEAEQB8wAjA1QANgJ/AB0CcwBkArsALwLFAGQCSwB
kAjcAZALGAC8C2gBkAQgAZAIgADcCYQBkAj8AZANiAGQCzgBkAuEALwJWAGQC3QAvAmsAZAJJADQC
ZAAiAqoAXgJuACADuAAaAnEAGQJFABMCTwAuATMAYgEv//sBJwAiAkQAUAH0ADIBLAApAhMAJAJjA
EoCEQAeAmcAHgIlAB4BIgAVAmcAHgJRAEoA7gA+AOn/8wIKAEoA9wBGA1cASgJRAEoCSgAeAmMASg
JnAB4BSgBKAcsAGAE5ABQCUABCAgIAAQMRAAEB4v/6AgEAAQHOABQBLwBAAPoAYAEvACECRABNA0Y
AJAItAHgBKgAcAkQAUAEsAHQAygAgAi0AOQD3ADYA9wAWAaEANgGhABYCbAAlAYMAeAGDADkA6/9q
AhsAFAIKABUB/QAVAAAAAwAAAAMAAAAcAAEAAAAAAKQAAwABAAAAHAAEAIgAAAAeABAAAwAOAH4Aq
QCrALEAtAC3ALsgGSAdICYgOiBEISL7Av//AAAAIACpAKsAsAC0ALcAuyAYIBwgJiA5IEQhIvsB//
//4/+5/7j/tP+y/7D/reBR4E/gR+A14CzfTwVxAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAEGAAABAAAAAAAAAAECAAAAAgAAAAAAAAAAAAAAAAAAAAEAAAMEBQYHCAkKCwwNDg8QERIT
FBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMT
U5PUFFSU1RVVldYWVpbXF1eX2BhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQAAA
AAAAAAYnFmAAAAAABlAAAAAAAAAAAAAAAAAAAAAAAAAAAAY2htAAAAAAAAAABrbGlqAAAAAHAAbm9
ycwBnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmACYAJgAmAD4AUgCCAMoBCgFO
AVwBcgGIAaYBvAHKAdYB6AH2AgwCIAJKAogCpgLWAw4DIgNkA5wDugPUA+gD/AQQBEYEogS8BPoFJ
gVSBWoFgAWwBcoF1gX6BhQGJAZMBmgGiga0BuIHGgdUB2YHkAeiB8AH3AfyCAoIHAgqCDoITghcCG
oIogjSCPoJKglYCXwJwgnqCgIKKApACl4Klgq8CtwLDAs8C1YLjAuyC9oL7gwMDCYMSAxgDKAMrAz
qDQoNTA1mDYQNoA2uDcAN2g3oDfYODA4iDkoOXA5sDnoOnA7EDvwAAAAFAAAAAAH0ArwAAwAGAAkA
DAAPAAAxESERAxMhExcRASELARETAfT6qv6syKr+jgFUqsiqArz9RAGLAP/+1P8B/v3VAP8BLP4CA
P8AAgA5//IAuQKyAAMACwAANyMDMwIyFhQGIiY0oE4MZk84JCQ4JLQB/v3AJDgkJDgAAgBWAeUBPA
LfAAMABwAAEyMnMxcjJzOmRgpagkYKWgHl+vr6AAAAAAIARgAAAf4CsgAbAB8AAAEHMxUjByM3Iwc
jNyM1MzcjNTM3MwczNzMHMxUrAQczAZgdZXEvOi9bLzovWmYdZXEvOi9bLzovWp9bHlsBn4w429vb
2ziMONvb29s4jAAAAAMANf+mAg4DDAAfACYALAAAJRQGBxUjNS4BJzMeARcRLgE0Njc1MxUeARcjJ
icVHgEBFBYXNQ4BExU+ATU0Ag5xWDpgcgRcBz41Xl9oVTpVYwpcC1ttXP6cLTQuM5szOrVRZwlOTQ
ZqVzZECAEAGlukZAlOTQdrUG8O7iNlAQgxNhDlCDj+8/YGOjReAAAAAAUAKf/yArsCvAAHAAsAFQA
dACcAABIyFhQGIiY0EyMBMwQiBhUUFjI2NTQSMhYUBiImNDYiBhUUFjI2NTR5iFBQiFCVVwHAV/5c
OiMjOiPmiFBQiFCxOiMjOiMCvFaSVlaS/ZoCsjIzMC80NC8w/uNWklZWkhozMC80NC8wAAAAAgBA/
/ICbgLAACIALgAAARUjEQYjIiY1NDY3LgE1NDYzMhcVJiMiBhUUFhcWOwE1MxUFFBYzMjc1IyIHDg
ECbmBcYYOOVkg7R4hsQjY4Q0RNRD4SLDxW/pJUXzksPCkUUk0BgUb+zBVUZ0BkDw5RO1huCkULQzp
COAMBcHDHRz0J/AIHRQAAAAEAKwHlAIUC3wADAAATIycze0YKWgHl+gAAAAABAGT/sAEXAwwACQAA
EzMGEBcjLgE0Nt06dXU6OUBAAwzG/jDGVePs4wAAAAEAHv+wANEDDAAJAAATMx4BFAYHIzYQHjo5Q
EA5OnUDDFXj7ONVxgHQAAAAAQBVAFIB2wHbAA4AAAE3FwcXBycHJzcnNxcnMwEtmxOfcTJjYzJxnx
ObCj4BKD07KYolmZkliik7PbMAAQBQAFUB9AIlAAsAAAEjFSM1IzUzNTMVMwH0tTq1tTq1AR/Kyjj
OzgAAAAAB/+H/iACMAGQABAAANwcjNzOMWlFOXVrS3AAAAQAgAP8A8gE3AAMAABMjNTPy0tIA/zgA
AQAl//IApQByAAcAADYyFhQGIiY0STgkJDgkciQ4JCQ4AAAAAf/7/+IBNALQAAMAABcjEzM5Pvs+H
gLuAAAAAAIAKf/yAhsCwAADAAcAABIgECA2IBAgKQHy/g5gATL+zgLA/TJEAkYAAAAAAQCCAAABlg
KyAAgAAAERIxEHNTc2MwGWVr6SIygCsv1OAldxW1sWAAEAPAAAAg4CwAAZAAA3IRUhNRM+ATU0JiM
iDwEjNz4BMzIWFRQGB7kBUv4x+kI2QTt+EAFWAQp8aGVtSl5GRjEA/0RVLzlLmAoKa3FsUkNxXQAA
AAEALf/yAhYCwAAqAAABHgEVFAYjIi8BMxceATMyNjU0KwE1MzI2NTQmIyIGDwEjNz4BMzIWFRQGA
YxBSZJo2RUBVgEHV0JBUaQREUBUQzc5TQcBVgEKfGhfcEMBbxJbQl1x0AoKRkZHPn9GSD80QUVCCg
pfbGBPOlgAAAACACEAAAIkArIACgAPAAAlIxUjNSE1ATMRMyMRBg8BAiRXVv6qAVZWV60dHLCurq4
rAdn+QgFLMibzAAABADn/8gIZArIAHQAAATIWFRQGIyIvATMXFjMyNjU0JiMiByMTIRUhBzc2ATNv
d5Fl1RQBVgIad0VSTkVhL1IwAYj+vh8rMAHHgGdtgcUKCoFXTU5bYgGRRvAuHQAAAAACACv/8gITA
sAAFwAjAAABMhYVFAYjIhE0NjMyFh8BIycmIyIDNzYTMjY1NCYjIgYVFBYBLmp7imr0l3RZdAgBXA
IYZ5wKJzU6QVNJSz5SUAHSgWltiQFGxcNlVQoKdv7sPiz+ZF1LTmJbU0lhAAAAAQAyAAACGgKyAAY
AAAEVASMBITUCGv6oXAFL/oECsij9dgJsRgAAAAMALP/xAhgCwAAWACAALAAAAR4BFRQGIyImNTQ2
Ny4BNTQ2MhYVFAYmIgYVFBYyNjU0AzI2NTQmIyIGFRQWAZQ5S5BmbIpPOjA7ecp5P2F8Q0J8RIVJS
0pLTEtOAW0TXTxpZ2ZqPF0SE1A3VWVlVTdQ/UU0N0RENzT9/ko+Ok1NOj1LAAIAMf/yAhkCwAAXAC
MAAAEyERQGIyImLwEzFxYzMhMHBiMiJjU0NhMyNjU0JiMiBhUUFgEl9Jd0WXQIAVwCGGecCic1SWp
7imo+UlBAQVNJAsD+usXDZVUKCnYBFD4sgWltif5kW1NJYV1LTmIAAAACACX/8gClAiAABwAPAAAS
MhYUBiImNBIyFhQGIiY0STgkJDgkJDgkJDgkAiAkOCQkOP52JDgkJDgAAAAC/+H/iAClAiAABwAMA
AASMhYUBiImNBMHIzczSTgkJDgkaFpSTl4CICQ4JCQ4/mba5gAAAQBnAB4B+AH0AAYAAAENARUlNS
UB+P6qAVb+bwGRAbCmpkbJRMkAAAIAUAC7AfQBuwADAAcAAAEhNSERITUhAfT+XAGk/lwBpAGDOP8
AOAABAEQAHgHVAfQABgAAARUFNS0BNQHV/m8BVv6qAStEyUSmpkYAAAAAAgAj//IB1ALAABgAIAAA
ATIWFRQHDgEHIz4BNz4BNTQmIyIGByM+ARIyFhQGIiY0AQRibmktIAJWBSEqNig+NTlHBFoDezQ4J
CQ4JALAZ1BjaS03JS1DMD5LLDQ/SUVgcv2yJDgkJDgAAAAAAgA2/5gDFgKYADYAQgAAAQMGFRQzMj
Y1NCYjIg4CFRQWMzI2NxcGIyImNTQ+AjMyFhUUBiMiJwcGIyImNTQ2MzIfATcHNzYmIyIGFRQzMjY
Cej8EJjJJlnBAfGQ+oHtAhjUYg5OPx0h2k06Os3xRWQsVLjY5VHtdPBwJETcJDyUoOkZEJz8B0f74
EQ8kZl6EkTFZjVOLlyknMVm1pmCiaTq4lX6CSCknTVRmmR8wPdYnQzxuSWVGAAIAHQAAAncCsgAHA
AoAACUjByMTMxMjATMDAcj+UVz4dO5d/sjPZPT0ArL9TgE6ATQAAAADAGQAAAJMArIAEAAbACcAAA
EeARUUBgcGKwERMzIXFhUUJRUzMjc2NTQnJiMTPgE1NCcmKwEVMzIBvkdHZkwiNt7LOSGq/oeFHBt
hahIlSTM+cB8Yj5UWAW8QT0VYYgwFArIEF5Fv1eMED2NfDAL93AU+N24PBP0AAAAAAQAv//ICjwLA
ABsAAAEyFh8BIycmIyIGFRQWMzI/ATMHDgEjIiY1NDYBdX+PCwFWAiKiaHx5ZaIiAlYBCpWBk6a0A
sCAagoKpqN/gaOmCgplhcicn8sAAAIAZAAAAp8CsgAMABkAAAEeARUUBgcGKwERMzITPgE1NCYnJi
sBETMyAY59lJp8IzXN0jUVWmdjWRs5d3I4Aq4QqJWUug8EArL9mQ+PeHGHDgX92gAAAAABAGQAAAI
vArIACwAAJRUhESEVIRUhFSEVAi/+NQHB/pUBTf6zRkYCskbwRvAAAAABAGQAAAIlArIACQAAExUh
FSERIxEhFboBQ/69VgHBAmzwRv7KArJGAAAAAAEAL//yAo8CwAAfAAABMxEjNQcGIyImNTQ2MzIWH
wEjJyYjIgYVFBYzMjY1IwGP90wfPnWTprSSf48LAVYCIqJofHllVG+hAU3+s3hARsicn8uAagoKpq
N/gaN1XAAAAAEAZAAAAowCsgALAAABESMRIREjETMRIRECjFb+hFZWAXwCsv1OAS7+0gKy/sQBPAA
AAAABAGQAAAC6ArIAAwAAMyMRM7pWVgKyAAABADf/8gHoArIAEwAAAREUBw4BIyImLwEzFxYzMjc2
NREB6AIFcGpgbQIBVgIHfXQKAQKy/lYxIltob2EpKYyEFD0BpwAAAAABAGQAAAJ0ArIACwAACQEjA
wcVIxEzEQEzATsBJ3ntQlZWAVVlAWH+nwEnR+ACsv6RAW8AAQBkAAACLwKyAAUAACUVIREzEQIv/j
VWRkYCsv2UAAABAGQAAAMUArIAFAAAAREjETQ3BgcDIwMmJxYVESMRMxsBAxRWAiMxemx8NxsCVo7
MywKy/U4BY7ZLco7+nAFmoFxLtP6dArL9lwJpAAAAAAEAZAAAAoACsgANAAAhIwEWFREjETMBJjUR
MwKAhP67A1aEAUUDVAJeeov+pwKy/aJ5jAFZAAAAAgAv//ICuwLAAAkAEwAAEiAWFRQGICY1NBIyN
jU0JiIGFRTbATSsrP7MrNrYenrYegLAxaKhxsahov47nIeIm5uIhwACAGQAAAJHArIADgAYAAABHg
EVFAYHBisBESMRMzITNjQnJisBETMyAZRUX2VOHzuAVtY7GlxcGDWIiDUCrgtnVlVpCgT+5gKy/rU
V1BUF/vgAAAACAC//zAK9AsAAEgAcAAAlFhcHJiMiBwYjIiY1NDYgFhUUJRQWMjY1NCYiBgI9PUMx
UDcfKh8omqysATSs/dR62Hp62HpICTg7NgkHxqGixcWitbWHnJyHiJubAAIAZAAAAlgCsgAXACMAA
CUWFyMmJyYnJisBESMRMzIXHgEVFAYHFiUzMjc+ATU0JyYrAQIqDCJfGQwNWhAhglbiOx9QXEY1Tv
6bhDATMj1lGSyMtYgtOXR0BwH+1wKyBApbU0BSESRAAgVAOGoQBAABADT/8gIoAsAAJQAAATIWFyM
uASMiBhUUFhceARUUBiMiJiczHgEzMjY1NCYnLgE1NDYBOmd2ClwGS0E6SUNRdW+HZnKKC1wPWkQ9
Uk1cZGuEAsBwXUJHNjQ3OhIbZVZZbm5kREo+NT5DFRdYUFdrAAAAAAEAIgAAAmQCsgAHAAABIxEjE
SM1IQJk9lb2AkICbP2UAmxGAAEAXv/yAmQCsgAXAAABERQHDgEiJicmNREzERQXHgEyNjc2NRECZA
IIgfCBCAJWAgZYmlgGAgKy/k0qFFxzc1wUKgGz/lUrEkRQUEQSKwGrAAAAAAEAIAAAAnoCsgAGAAA
hIwMzGwEzAYJ07l3N1FwCsv2PAnEAAAEAGgAAA7ECsgAMAAABAyMLASMDMxsBMxsBA7HAcZyicrZi
kaB0nJkCsv1OAlP9rQKy/ZsCW/2kAmYAAAEAGQAAAm8CsgALAAAhCwEjEwMzGwEzAxMCCsrEY/bkY
re+Y/D6AST+3AFcAVb+5gEa/q3+oQAAAQATAAACUQKyAAgAAAERIxEDMxsBMwFdVvRjwLphARD+8A
EQAaL+sQFPAAABAC4AAAI5ArIACQAAJRUhNQEhNSEVAQI5/fUBof57Aen+YUZGQgIqRkX92QAAAAA
BAGL/sAEFAwwABwAAARUjETMVIxEBBWlpowMMOP0UOANcAAAB//v/4gE0AtAAAwAABSMDMwE0Pvs+
HgLuAAAAAQAi/7AAxQMMAAcAABcjNTMRIzUzxaNpaaNQOALsOAABAFAA1wH0AmgABgAAJQsBIxMzE
wGwjY1GsESw1wFZ/qcBkf5vAAAAAQAy/6oBwv/iAAMAAAUhNSEBwv5wAZBWOAAAAAEAKQJEALYCsg
ADAAATIycztjhVUAJEbgAAAAACACT/8gHQAiAAHQAlAAAhJwcGIyImNTQ2OwE1NCcmIyIHIz4BMzI
XFh0BFBcnMjY9ASYVFAF6CR0wVUtgkJoiAgdgaQlaBm1Zrg4DCuQ9R+5MOSFQR1tbDiwUUXBUXowf
J8c9SjRORzYSgVwAAAAAAgBK//ICRQLfABEAHgAAATIWFRQGIyImLwEVIxEzETc2EzI2NTQmIyIGH
QEUFgFUcYCVbiNJEyNWVigySElcU01JXmECIJd4i5QTEDRJAt/+3jkq/hRuZV55ZWsdX14AAQAe//
IB9wIgABgAAAEyFhcjJiMiBhUUFjMyNjczDgEjIiY1NDYBF152DFocbEJXU0A1Rw1aE3pbaoKQAiB
oWH5qZm1tPDlaXYuLgZcAAAACAB7/8gIZAt8AEQAeAAABESM1BwYjIiY1NDYzMhYfAREDMjY9ATQm
IyIGFRQWAhlWKDJacYCVbiNJEyOnSV5hQUlcUwLf/SFVOSqXeIuUExA0ARb9VWVrHV9ebmVeeQACA
B7/8gH9AiAAFQAbAAABFAchHgEzMjY3Mw4BIyImNTQ2MzIWJyIGByEmAf0C/oAGUkA1SwlaD4FXbI
WObmt45UBVBwEqDQEYFhNjWD84W16Oh3+akU9aU60AAAEAFQAAARoC8gAWAAATBh0BMxUjESMRIzU
zNTQ3PgEzMhcVJqcDbW1WOTkDB0k8Hx5oAngVITRC/jQBzEIsJRs5PwVHEwAAAAIAHv8uAhkCIAAi
AC8AAAERFAcOASMiLwEzFx4BMzI2NzY9AQcGIyImNTQ2MzIWHwE1AzI2PQE0JiMiBhUUFgIZAQSEd
NwRAVcBBU5DTlUDASgyWnGAlW4jSRMjp0leYUFJXFMCEv5wSh1zeq8KCTI8VU0ZIQk5Kpd4i5QTED
RJ/iJlax1fXm5lXnkAAQBKAAACCgLkABcAAAEWFREjETQnLgEHDgEdASMRMxE3NjMyFgIIAlYCBDs
6RVRWViE5UVViAYUbQP7WASQxGzI7AQJyf+kC5P7TPSxUAAACAD4AAACsAsAABwALAAASMhYUBiIm
NBMjETNeLiAgLiBiVlYCwCAuICAu/WACEgAC//P/LgCnAsAABwAVAAASMhYUBiImNBcRFAcGIyInN
RY3NjURWS4gIC4gYgMLcRwNSgYCAsAgLiAgLo79wCUbZAJGBzMOHgJEAAAAAQBKAAACCALfAAsAAC
EnBxUjETMREzMHEwGTwTJWVvdu9/rgN6kC3/4oAQv6/ugAAQBG//wA3gLfAA8AABMRFBceATcVBiM
iJicmNRGcAQIcIxkkKi4CAQLf/bkhERoSBD4EJC8SNAJKAAAAAQBKAAADEAIgACQAAAEWFREjETQn
JiMiFREjETQnJiMiFREjETMVNzYzMhYXNzYzMhYDCwVWBAxedFYEDF50VlYiJko7ThAvJkpEVAGfI
jn+vAEcQyRZ1v76ARxDJFnW/voCEk08HzYtRB9HAAAAAAEASgAAAgoCIAAWAAABFhURIxE0JyYjIg
YdASMRMxU3NjMyFgIIAlYCCXBEVVZWITlRVWIBhRtA/tYBJDEbbHR/6QISWz0sVAAAAAACAB7/8gI
sAiAABwARAAASIBYUBiAmNBIyNjU0JiIGFRSlAQCHh/8Ah7ieWlqeWgIgn/Cfn/D+s3ZfYHV1YF8A
AgBK/zwCRQIgABEAHgAAATIWFRQGIyImLwERIxEzFTc2EzI2NTQmIyIGHQEUFgFUcYCVbiNJEyNWV
igySElcU01JXmECIJd4i5QTEDT+8wLWVTkq/hRuZV55ZWsdX14AAgAe/zwCGQIgABEAHgAAAREjEQ
cGIyImNTQ2MzIWHwE1AzI2PQE0JiMiBhUUFgIZVigyWnGAlW4jSRMjp0leYUFJXFMCEv0qARk5Kpd
4i5QTEDRJ/iJlax1fXm5lXnkAAQBKAAABPgIeAA0AAAEyFxUmBhURIxEzFTc2ARoWDkdXVlYwIwIe
B0EFVlf+0gISU0cYAAEAGP/yAa0CIAAjAAATMhYXIyYjIgYVFBYXHgEVFAYjIiYnMxYzMjY1NCYnL
gE1NDbkV2MJWhNdKy04PF1XbVhWbgxaE2ktOjlEUllkAiBaS2MrJCUoEBlPQkhOVFZoKCUmLhIWSE
BIUwAAAAEAFP/4ARQCiQAXAAATERQXHgE3FQYjIiYnJjURIzUzNTMVMxWxAQMmMx8qMjMEAUdHVmM
BzP7PGw4mFgY/BSwxDjQBNUJ7e0IAAAABAEL/8gICAhIAFwAAAREjNQcGIyImJyY1ETMRFBceATMy
Nj0BAgJWITlRT2EKBVYEBkA1RFECEv3uWj4qTToiOQE+/tIlJC43c4DpAAAAAAEAAQAAAfwCEgAGA
AABAyMDMxsBAfzJaclfop8CEv3uAhL+LQHTAAABAAEAAAMLAhIADAAAAQMjCwEjAzMbATMbAQMLqW
Z2dmapY3t0a3Z7AhL97gG+/kICEv5AAcD+QwG9AAAB//oAAAHWAhIACwAAARMjJwcjEwMzFzczARq
8ZIuKY763ZoWFYwEO/vLV1QEMAQbNzQAAAQAB/y4B+wISABEAAAEDDgEjIic1FjMyNj8BAzMbAQH7
2iFZQB8NDRIpNhQH02GenQIS/cFVUAJGASozEwIt/i4B0gABABQAAAGxAg4ACQAAJRUhNQEhNSEVA
QGx/mMBNP7iAYL+zkREQgGIREX+ewAAAAABAED/sAEOAwwALAAAASMiBhUUFxYVFAYHHgEVFAcGFR
QWOwEVIyImNTQ3NjU0JzU2NTQnJjU0NjsBAQ4MKiMLDS4pKS4NCyMqDAtERAwLUlILDERECwLUGBk
WTlsgKzUFBTcrIFtOFhkYOC87GFVMIkUIOAhFIkxVGDsvAAAAAAEAYP84AJoDIAADAAAXIxEzmjo6
yAPoAAEAIf+wAO8DDAAsAAATFQYVFBcWFRQGKwE1MzI2NTQnJjU0NjcuATU0NzY1NCYrATUzMhYVF
AcGFRTvUgsMREQLDCojCw0uKSkuDQsjKgwLREQMCwF6OAhFIkxVGDsvOBgZFk5bICs1BQU3KyBbTh
YZGDgvOxhVTCJFAAABAE0A3wH2AWQAEwAAATMUIyImJyYjIhUjNDMyFhcWMzIBvjhuGywtQR0xOG4
bLC1BHTEBZIURGCNMhREYIwAAAwAk/94DIgLoAAcAEQApAAAAIBYQBiAmECQgBhUUFiA2NTQlMhYX
IyYjIgYUFjMyNjczDgEjIiY1NDYBAQFE3d3+vN0CB/7wubkBELn+xVBnD1wSWDo+QTcqOQZcEmZWX
HN2Aujg/rbg4AFKpr+Mjb6+jYxbWEldV5ZZNShLVn5na34AAgB4AFIB9AGeAAUACwAAAQcXIyc3Mw
cXIyc3AUqJiUmJifOJiUmJiQGepqampqampqYAAAIAHAHSAQ4CwAAHAA8AABIyFhQGIiY0NiIGFBY
yNjRgakREakSTNCEhNCECwEJqQkJqCiM4IyM4AAAAAAIAUAAAAfQCCwALAA8AAAEzFSMVIzUjNTM1
MxMhNSEBP7W1OrW1OrX+XAGkAVs4tLQ4sP31OAAAAQB0AkQBAQKyAAMAABMjNzOsOD1QAkRuAAAAA
AEAIADsAKoBdgAHAAASMhYUBiImNEg6KCg6KAF2KDooKDoAAAIAOQBSAbUBngAFAAsAACUHIzcnMw
UHIzcnMwELiUmJiUkBM4lJiYlJ+KampqampqYAAAABADYB5QDhAt8ABAAAEzczByM2Xk1OXQHv8Po
AAQAWAeUAwQLfAAQAABMHIzczwV5NTl0C1fD6AAIANgHlAYsC3wAEAAkAABM3MwcjPwEzByM2Xk1O
XapeTU5dAe/w+grw+gAAAgAWAeUBawLfAAQACQAAEwcjNzMXByM3M8FeTU5dql5NTl0C1fD6CvD6A
AADACX/8gI1AHIABwAPABcAADYyFhQGIiY0NjIWFAYiJjQ2MhYUBiImNEk4JCQ4JOw4JCQ4JOw4JC
Q4JHIkOCQkOCQkOCQkOCQkOCQkOAAAAAEAeABSAUoBngAFAAABBxcjJzcBSomJSYmJAZ6mpqamAAA
AAAEAOQBSAQsBngAFAAAlByM3JzMBC4lJiYlJ+KampgAAAf9qAAABgQKyAAMAACsBATM/VwHAVwKy
AAAAAAIAFAHIAdwClAAHABQAABMVIxUjNSM1BRUjNwcjJxcjNTMXN9pKMkoByDICKzQqATJLKysCl
CmjoykBy46KiY3Lm5sAAQAVAAABvALyABgAAAERIxEjESMRIzUzNTQ3NjMyFxUmBgcGHQEBvFbCVj
k5AxHHHx5iVgcDAg798gHM/jQBzEIOJRuWBUcIJDAVIRYAAAABABX//AHkAvIAJQAAJR4BNxUGIyI
mJyY1ESYjIgcGHQEzFSMRIxEjNTM1NDc2MzIXERQBowIcIxkkKi4CAR4nXgwDbW1WLy8DEbNdOmYa
EQQ/BCQvEjQCFQZWFSEWQv40AcxCDiUblhP9uSEAAAAAAAAWAQ4AAQAAAAAAAAATACgAAQAAAAAAA
QAHAEwAAQAAAAAAAgAHAGQAAQAAAAAAAwAaAKIAAQAAAAAABAAHAM0AAQAAAAAABQA8AU8AAQAAAA
AABgAPAawAAQAAAAAACAALAdQAAQAAAAAACQALAfgAAQAAAAAACwAXAjQAAQAAAAAADAAXAnwAAwA
BBAkAAAAmAAAAAwABBAkAAQAOADwAAwABBAkAAgAOAFQAAwABBAkAAwA0AGwAAwABBAkABAAOAL0A
AwABBAkABQB4ANUAAwABBAkABgAeAYwAAwABBAkACAAWAbwAAwABBAkACQAWAeAAAwABBAkACwAuA
gQAAwABBAkADAAuAkwATgBvACAAUgBpAGcAaAB0AHMAIABSAGUAcwBlAHIAdgBlAGQALgAATm8gUm
lnaHRzIFJlc2VydmVkLgAAQQBpAGwAZQByAG8AbgAAQWlsZXJvbgAAUgBlAGcAdQBsAGEAcgAAUmV
ndWxhcgAAMQAuADEAMAAyADsAVQBLAFcATgA7AEEAaQBsAGUAcgBvAG4ALQBSAGUAZwB1AGwAYQBy
AAAxLjEwMjtVS1dOO0FpbGVyb24tUmVndWxhcgAAQQBpAGwAZQByAG8AbgAAQWlsZXJvbgAAVgBlA
HIAcwBpAG8AbgAgADEALgAxADAAMgA7AFAAUwAgADAAMAAxAC4AMQAwADIAOwBoAG8AdABjAG8Abg
B2ACAAMQAuADAALgA3ADAAOwBtAGEAawBlAG8AdABmAC4AbABpAGIAMgAuADUALgA1ADgAMwAyADk
AAFZlcnNpb24gMS4xMDI7UFMgMDAxLjEwMjtob3Rjb252IDEuMC43MDttYWtlb3RmLmxpYjIuNS41
ODMyOQAAQQBpAGwAZQByAG8AbgAtAFIAZQBnAHUAbABhAHIAAEFpbGVyb24tUmVndWxhcgAAUwBvA
HIAYQAgAFMAYQBnAGEAbgBvAABTb3JhIFNhZ2FubwAAUwBvAHIAYQAgAFMAYQBnAGEAbgBvAABTb3
JhIFNhZ2FubwAAaAB0AHQAcAA6AC8ALwB3AHcAdwAuAGQAbwB0AGMAbwBsAG8AbgAuAG4AZQB0AAB
odHRwOi8vd3d3LmRvdGNvbG9uLm5ldAAAaAB0AHQAcAA6AC8ALwB3AHcAdwAuAGQAbwB0AGMAbwBs
AG8AbgAuAG4AZQB0AABodHRwOi8vd3d3LmRvdGNvbG9uLm5ldAAAAAACAAAAAAAA/4MAMgAAAAAAA
AAAAAAAAAAAAAAAAAAAAHQAAAABAAIAAwAEAAUABgAHAAgACQAKAAsADAANAA4ADwAQABEAEgATAB
QAFQAWABcAGAAZABoAGwAcAB0AHgAfACAAIQAiACMAJAAlACYAJwAoACkAKgArACwALQAuAC8AMAA
xADIAMwA0ADUANgA3ADgAOQA6ADsAPAA9AD4APwBAAEEAQgBDAEQARQBGAEcASABJAEoASwBMAE0A
TgBPAFAAUQBSAFMAVABVAFYAVwBYAFkAWgBbAFwAXQBeAF8AYABhAIsAqQCDAJMAjQDDAKoAtgC3A
LQAtQCrAL4AvwC8AIwAwADBAAAAAAAB//8AAgABAAAADAAAABwAAAACAAIAAwBxAAEAcgBzAAIABA
AAAAIAAAABAAAACgBMAGYAAkRGTFQADmxhdG4AGgAEAAAAAP//AAEAAAAWAANDQVQgAB5NT0wgABZ
ST00gABYAAP//AAEAAAAA//8AAgAAAAEAAmxpZ2EADmxvY2wAFAAAAAEAAQAAAAEAAAACAAYAEAAG
AAAAAgASADQABAAAAAEATAADAAAAAgAQABYAAQAcAAAAAQABAE8AAQABAGcAAQABAE8AAwAAAAIAE
AAWAAEAHAAAAAEAAQAvAAEAAQBnAAEAAQAvAAEAGgABAAgAAgAGAAwAcwACAE8AcgACAEwAAQABAE
kAAAABAAAACgBGAGAAAkRGTFQADmxhdG4AHAAEAAAAAP//AAIAAAABABYAA0NBVCAAFk1PTCAAFlJ
PTSAAFgAA//8AAgAAAAEAAmNwc3AADmtlcm4AFAAAAAEAAAAAAAEAAQACAAYADgABAAAAAQASAAIA
AAACAB4ANgABAAoABQAFAAoAAgABACQAPQAAAAEAEgAEAAAAAQAMAAEAOP/nAAEAAQAkAAIGigAEA
AAFJAXKABoAGQAA//gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAD/sv+4/+z/7v/MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAD/xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/9T/6AAAAAD/8QAA
ABD/vQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/7gAAAAAAAAAAAAAAAAAA//MAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABIAAAAAAAAAAP/5AAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gAAD/4AAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA//L/9AAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAA/+gAAAAAAAkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/zAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/mAAAAAAAAAAAAAAAAAAD
/4gAA//AAAAAA//YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/+AAAAAAAAP/OAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/zv/qAAAAAP/0AAAACAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/ZAAD/egAA/1kAAAAA/5D/rgAAAAAAAAAAAA
AAAAAAAAAAAAAAAAD/9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAD/8AAA/7b/8P+wAAD/8P/E/98AAAAA/8P/+P/0//oAAAAAAAAAAAAA//gA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/+AAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/w//C/9MAAP/SAAD/9wAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAD/yAAA/+kAAAAA//QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/9wAAAAD//QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAP/2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAP/cAAAAAAAAAAAAAAAA/7YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAP/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/6AAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAkAFAAEAAAAAQACwAAABcA
BgAAAAAAAAAIAA4AAAAAAAsAEgAAAAAAAAATABkAAwANAAAAAQAJAAAAAAAAAAAAAAAAAAAAGAAAA
AAABwAAAAAAAAAAAAAAFQAFAAAAAAAYABgAAAAUAAAACgAAAAwAAgAPABEAFgAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAEAEQBdAAYAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAcAAAAAAAAABwAAAAAACAAAAAAAAAAAAAcAAAAHAAAAEwAJ
ABUADgAPAAAACwAQAAAAAAAAAAAAAAAAAAUAGAACAAIAAgAAAAIAGAAXAAAAGAAAABYAFgACABYAA
gAWAAAAEQADAAoAFAAMAA0ABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASAAAAEgAGAAEAHgAkAC
YAJwApACoALQAuAC8AMgAzADcAOAA5ADoAPAA9AEUASABOAE8AUgBTAFUAVwBZAFoAWwBcAF0AcwA
AAAAAAQAAAADa3tfFAAAAANAan9kAAAAA4QodoQ==
"""
)
),
10 if size is None else size,
layout_engine=Layout.BASIC,
)
return load_default_imagefont()
venv\Lib\site-packages\PIL\ImageGrab.py
#
# The Python Imaging Library
# $Id$
#
# screen grabber
#
# History:
# 2001-04-26 fl created
# 2001-09-17 fl use builtin driver, if present
# 2002-11-19 fl added grabclipboard support
#
# Copyright (c) 2001-2002 by Secret Labs AB
# Copyright (c) 2001-2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
import os
import shutil
import subprocess
import sys
import tempfile
from . import Image
TYPE_CHECKING = False
if TYPE_CHECKING:
from . import ImageWin
def grab(
bbox: tuple[int, int, int, int] | None = None,
include_layered_windows: bool = False,
all_screens: bool = False,
xdisplay: str | None = None,
window: int | ImageWin.HWND | None = None,
) -> Image.Image:
im: Image.Image
if xdisplay is None:
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
args = ["screencapture"]
if bbox:
left, top, right, bottom = bbox
args += ["-R", f"{left},{top},{right-left},{bottom-top}"]
subprocess.call(args + ["-x", filepath])
im = Image.open(filepath)
im.load()
os.unlink(filepath)
if bbox:
im_resized = im.resize((right - left, bottom - top))
im.close()
return im_resized
return im
elif sys.platform == "win32":
if window is not None:
all_screens = -1
offset, size, data = Image.core.grabscreen_win32(
include_layered_windows,
all_screens,
int(window) if window is not None else 0,
)
im = Image.frombytes(
"RGB",
size,
data,
# RGB, 32-bit line padding, origin lower left corner
"raw",
"BGR",
(size[0] * 3 + 3) & -4,
-1,
)
if bbox:
x0, y0 = offset
left, top, right, bottom = bbox
im = im.crop((left - x0, top - y0, right - x0, bottom - y0))
return im
# Cast to Optional[str] needed for Windows and macOS.
display_name: str | None = xdisplay
try:
if not Image.core.HAVE_XCB:
msg = "Pillow was built without XCB support"
raise OSError(msg)
size, data = Image.core.grabscreen_x11(display_name)
except OSError:
if display_name is None and sys.platform not in ("darwin", "win32"):
if shutil.which("gnome-screenshot"):
args = ["gnome-screenshot", "-f"]
elif shutil.which("grim"):
args = ["grim"]
elif shutil.which("spectacle"):
args = ["spectacle", "-n", "-b", "-f", "-o"]
else:
raise
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
subprocess.call(args + [filepath])
im = Image.open(filepath)
im.load()
os.unlink(filepath)
if bbox:
im_cropped = im.crop(bbox)
im.close()
return im_cropped
return im
else:
raise
else:
im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1)
if bbox:
im = im.crop(bbox)
return im
def grabclipboard() -> Image.Image | list[str] | None:
if sys.platform == "darwin":
p = subprocess.run(
["osascript", "-e", "get the clipboard as «class PNGf»"],
capture_output=True,
)
if p.returncode != 0:
return None
import binascii
data = io.BytesIO(binascii.unhexlify(p.stdout[11:-3]))
return Image.open(data)
elif sys.platform == "win32":
fmt, data = Image.core.grabclipboard_win32()
if fmt == "file": # CF_HDROP
import struct
o = struct.unpack_from("I", data)[0]
if data[16] == 0:
files = data[o:].decode("mbcs").split("\0")
else:
files = data[o:].decode("utf-16le").split("\0")
return files[: files.index("")]
if isinstance(data, bytes):
data = io.BytesIO(data)
if fmt == "png":
from . import PngImagePlugin
return PngImagePlugin.PngImageFile(data)
elif fmt == "DIB":
from . import BmpImagePlugin
return BmpImagePlugin.DibImageFile(data)
return None
else:
if os.getenv("WAYLAND_DISPLAY"):
session_type = "wayland"
elif os.getenv("DISPLAY"):
session_type = "x11"
else: # Session type check failed
session_type = None
if shutil.which("wl-paste") and session_type in ("wayland", None):
args = ["wl-paste", "-t", "image"]
elif shutil.which("xclip") and session_type in ("x11", None):
args = ["xclip", "-selection", "clipboard", "-t", "image/png", "-o"]
else:
msg = "wl-paste or xclip is required for ImageGrab.grabclipboard() on Linux"
raise NotImplementedError(msg)
p = subprocess.run(args, capture_output=True)
if p.returncode != 0:
err = p.stderr
for silent_error in [
# wl-paste, when the clipboard is empty
b"Nothing is copied",
# Ubuntu/Debian wl-paste, when the clipboard is empty
b"No selection",
# Ubuntu/Debian wl-paste, when an image isn't available
b"No suitable type of content copied",
# wl-paste or Ubuntu/Debian xclip, when an image isn't available
b" not available",
# xclip, when an image isn't available
b"cannot convert ",
# xclip, when the clipboard isn't initialized
b"xclip: Error: There is no owner for the ",
]:
if silent_error in err:
return None
msg = f"{args[0]} error"
if err:
msg += f": {err.strip().decode()}"
raise ChildProcessError(msg)
data = io.BytesIO(p.stdout)
im = Image.open(data)
im.load()
return im
venv\Lib\site-packages\PIL\ImageMath.py
#
# The Python Imaging Library
# $Id$
#
# a simple math add-on for the Python Imaging Library
#
# History:
# 1999-02-15 fl Original PIL Plus release
# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6
# 2005-09-12 fl Fixed int() and float() for Python 2.4.1
#
# Copyright (c) 1999-2005 by Secret Labs AB
# Copyright (c) 2005 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import builtins
from types import CodeType
from typing import Any, Callable
from . import Image, _imagingmath
from ._deprecate import deprecate
class _Operand:
"""Wraps an image operand, providing standard operators"""
def __init__(self, im: Image.Image):
self.im = im
def __fixup(self, im1: _Operand | float) -> Image.Image:
# convert image to suitable mode
if isinstance(im1, _Operand):
# argument was an image.
if im1.im.mode in ("1", "L"):
return im1.im.convert("I")
elif im1.im.mode in ("I", "F"):
return im1.im
else:
msg = f"unsupported mode: {im1.im.mode}"
raise ValueError(msg)
else:
# argument was a constant
if isinstance(im1, (int, float)) and self.im.mode in ("1", "L", "I"):
return Image.new("I", self.im.size, im1)
else:
return Image.new("F", self.im.size, im1)
def apply(
self,
op: str,
im1: _Operand | float,
im2: _Operand | float | None = None,
mode: str | None = None,
) -> _Operand:
im_1 = self.__fixup(im1)
if im2 is None:
# unary operation
out = Image.new(mode or im_1.mode, im_1.size, None)
try:
op = getattr(_imagingmath, f"{op}_{im_1.mode}")
except AttributeError as e:
msg = f"bad operand type for '{op}'"
raise TypeError(msg) from e
_imagingmath.unop(op, out.getim(), im_1.getim())
else:
# binary operation
im_2 = self.__fixup(im2)
if im_1.mode != im_2.mode:
# convert both arguments to floating point
if im_1.mode != "F":
im_1 = im_1.convert("F")
if im_2.mode != "F":
im_2 = im_2.convert("F")
if im_1.size != im_2.size:
# crop both arguments to a common size
size = (
min(im_1.size[0], im_2.size[0]),
min(im_1.size[1], im_2.size[1]),
)
if im_1.size != size:
im_1 = im_1.crop((0, 0) + size)
if im_2.size != size:
im_2 = im_2.crop((0, 0) + size)
out = Image.new(mode or im_1.mode, im_1.size, None)
try:
op = getattr(_imagingmath, f"{op}_{im_1.mode}")
except AttributeError as e:
msg = f"bad operand type for '{op}'"
raise TypeError(msg) from e
_imagingmath.binop(op, out.getim(), im_1.getim(), im_2.getim())
return _Operand(out)
# unary operators
def __bool__(self) -> bool:
# an image is "true" if it contains at least one non-zero pixel
return self.im.getbbox() is not None
def __abs__(self) -> _Operand:
return self.apply("abs", self)
def __pos__(self) -> _Operand:
return self
def __neg__(self) -> _Operand:
return self.apply("neg", self)
# binary operators
def __add__(self, other: _Operand | float) -> _Operand:
return self.apply("add", self, other)
def __radd__(self, other: _Operand | float) -> _Operand:
return self.apply("add", other, self)
def __sub__(self, other: _Operand | float) -> _Operand:
return self.apply("sub", self, other)
def __rsub__(self, other: _Operand | float) -> _Operand:
return self.apply("sub", other, self)
def __mul__(self, other: _Operand | float) -> _Operand:
return self.apply("mul", self, other)
def __rmul__(self, other: _Operand | float) -> _Operand:
return self.apply("mul", other, self)
def __truediv__(self, other: _Operand | float) -> _Operand:
return self.apply("div", self, other)
def __rtruediv__(self, other: _Operand | float) -> _Operand:
return self.apply("div", other, self)
def __mod__(self, other: _Operand | float) -> _Operand:
return self.apply("mod", self, other)
def __rmod__(self, other: _Operand | float) -> _Operand:
return self.apply("mod", other, self)
def __pow__(self, other: _Operand | float) -> _Operand:
return self.apply("pow", self, other)
def __rpow__(self, other: _Operand | float) -> _Operand:
return self.apply("pow", other, self)
# bitwise
def __invert__(self) -> _Operand:
return self.apply("invert", self)
def __and__(self, other: _Operand | float) -> _Operand:
return self.apply("and", self, other)
def __rand__(self, other: _Operand | float) -> _Operand:
return self.apply("and", other, self)
def __or__(self, other: _Operand | float) -> _Operand:
return self.apply("or", self, other)
def __ror__(self, other: _Operand | float) -> _Operand:
return self.apply("or", other, self)
def __xor__(self, other: _Operand | float) -> _Operand:
return self.apply("xor", self, other)
def __rxor__(self, other: _Operand | float) -> _Operand:
return self.apply("xor", other, self)
def __lshift__(self, other: _Operand | float) -> _Operand:
return self.apply("lshift", self, other)
def __rshift__(self, other: _Operand | float) -> _Operand:
return self.apply("rshift", self, other)
# logical
def __eq__(self, other: _Operand | float) -> _Operand: # type: ignore[override]
return self.apply("eq", self, other)
def __ne__(self, other: _Operand | float) -> _Operand: # type: ignore[override]
return self.apply("ne", self, other)
def __lt__(self, other: _Operand | float) -> _Operand:
return self.apply("lt", self, other)
def __le__(self, other: _Operand | float) -> _Operand:
return self.apply("le", self, other)
def __gt__(self, other: _Operand | float) -> _Operand:
return self.apply("gt", self, other)
def __ge__(self, other: _Operand | float) -> _Operand:
return self.apply("ge", self, other)
# conversions
def imagemath_int(self: _Operand) -> _Operand:
return _Operand(self.im.convert("I"))
def imagemath_float(self: _Operand) -> _Operand:
return _Operand(self.im.convert("F"))
# logical
def imagemath_equal(self: _Operand, other: _Operand | float | None) -> _Operand:
return self.apply("eq", self, other, mode="I")
def imagemath_notequal(self: _Operand, other: _Operand | float | None) -> _Operand:
return self.apply("ne", self, other, mode="I")
def imagemath_min(self: _Operand, other: _Operand | float | None) -> _Operand:
return self.apply("min", self, other)
def imagemath_max(self: _Operand, other: _Operand | float | None) -> _Operand:
return self.apply("max", self, other)
def imagemath_convert(self: _Operand, mode: str) -> _Operand:
return _Operand(self.im.convert(mode))
ops = {
"int": imagemath_int,
"float": imagemath_float,
"equal": imagemath_equal,
"notequal": imagemath_notequal,
"min": imagemath_min,
"max": imagemath_max,
"convert": imagemath_convert,
}
def lambda_eval(
expression: Callable[[dict[str, Any]], Any],
options: dict[str, Any] = {},
**kw: Any,
) -> Any:
"""
Returns the result of an image function.
:py:mod:`~PIL.ImageMath` only supports single-layer images. To process multi-band
images, use the :py:meth:`~PIL.Image.Image.split` method or
:py:func:`~PIL.Image.merge` function.
:param expression: A function that receives a dictionary.
:param options: Values to add to the function's dictionary. Deprecated.
You can instead use one or more keyword arguments.
:param **kw: Values to add to the function's dictionary.
:return: The expression result. This is usually an image object, but can
also be an integer, a floating point value, or a pixel tuple,
depending on the expression.
"""
if options:
deprecate(
"ImageMath.lambda_eval options",
12,
"ImageMath.lambda_eval keyword arguments",
)
args: dict[str, Any] = ops.copy()
args.update(options)
args.update(kw)
for k, v in args.items():
if isinstance(v, Image.Image):
args[k] = _Operand(v)
out = expression(args)
try:
return out.im
except AttributeError:
return out
def unsafe_eval(
expression: str,
options: dict[str, Any] = {},
**kw: Any,
) -> Any:
"""
Evaluates an image expression. This uses Python's ``eval()`` function to process
the expression string, and carries the security risks of doing so. It is not
recommended to process expressions without considering this.
:py:meth:`~lambda_eval` is a more secure alternative.
:py:mod:`~PIL.ImageMath` only supports single-layer images. To process multi-band
images, use the :py:meth:`~PIL.Image.Image.split` method or
:py:func:`~PIL.Image.merge` function.
:param expression: A string containing a Python-style expression.
:param options: Values to add to the evaluation context. Deprecated.
You can instead use one or more keyword arguments.
:param **kw: Values to add to the evaluation context.
:return: The evaluated expression. This is usually an image object, but can
also be an integer, a floating point value, or a pixel tuple,
depending on the expression.
"""
if options:
deprecate(
"ImageMath.unsafe_eval options",
12,
"ImageMath.unsafe_eval keyword arguments",
)
# build execution namespace
args: dict[str, Any] = ops.copy()
for k in [*options, *kw]:
if "__" in k or hasattr(builtins, k):
msg = f"'{k}' not allowed"
raise ValueError(msg)
args.update(options)
args.update(kw)
for k, v in args.items():
if isinstance(v, Image.Image):
args[k] = _Operand(v)
compiled_code = compile(expression, "", "eval")
def scan(code: CodeType) -> None:
for const in code.co_consts:
if type(const) is type(compiled_code):
scan(const)
for name in code.co_names:
if name not in args and name != "abs":
msg = f"'{name}' not allowed"
raise ValueError(msg)
scan(compiled_code)
out = builtins.eval(expression, {"__builtins": {"abs": abs}}, args)
try:
return out.im
except AttributeError:
return out
def eval(
expression: str,
_dict: dict[str, Any] = {},
**kw: Any,
) -> Any:
"""
Evaluates an image expression.
Deprecated. Use lambda_eval() or unsafe_eval() instead.
:param expression: A string containing a Python-style expression.
:param _dict: Values to add to the evaluation context. You
can either use a dictionary, or one or more keyword
arguments.
:return: The evaluated expression. This is usually an image object, but can
also be an integer, a floating point value, or a pixel tuple,
depending on the expression.
.. deprecated:: 10.3.0
"""
deprecate(
"ImageMath.eval",
12,
"ImageMath.lambda_eval or ImageMath.unsafe_eval",
)
return unsafe_eval(expression, _dict, **kw)
venv\Lib\site-packages\PIL\ImageMode.py
#
# The Python Imaging Library.
# $Id$
#
# standard mode descriptors
#
# History:
# 2006-03-20 fl Added
#
# Copyright (c) 2006 by Secret Labs AB.
# Copyright (c) 2006 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import sys
from functools import lru_cache
from typing import NamedTuple
from ._deprecate import deprecate
class ModeDescriptor(NamedTuple):
"""Wrapper for mode strings."""
mode: str
bands: tuple[str, ...]
basemode: str
basetype: str
typestr: str
def __str__(self) -> str:
return self.mode
@lru_cache
def getmode(mode: str) -> ModeDescriptor:
"""Gets a mode descriptor for the given mode."""
endian = "<" if sys.byteorder == "little" else ">"
modes = {
# core modes
# Bits need to be extended to bytes
"1": ("L", "L", ("1",), "|b1"),
"L": ("L", "L", ("L",), "|u1"),
"I": ("L", "I", ("I",), f"{endian}i4"),
"F": ("L", "F", ("F",), f"{endian}f4"),
"P": ("P", "L", ("P",), "|u1"),
"RGB": ("RGB", "L", ("R", "G", "B"), "|u1"),
"RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"),
"RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"),
# UNDONE - unsigned |u1i1i1
"LAB": ("RGB", "L", ("L", "A", "B"), "|u1"),
"HSV": ("RGB", "L", ("H", "S", "V"), "|u1"),
# extra experimental modes
"RGBa": ("RGB", "L", ("R", "G", "B", "a"), "|u1"),
"BGR;15": ("RGB", "L", ("B", "G", "R"), "|u1"),
"BGR;16": ("RGB", "L", ("B", "G", "R"), "|u1"),
"BGR;24": ("RGB", "L", ("B", "G", "R"), "|u1"),
"LA": ("L", "L", ("L", "A"), "|u1"),
"La": ("L", "L", ("L", "a"), "|u1"),
"PA": ("RGB", "L", ("P", "A"), "|u1"),
}
if mode in modes:
if mode in ("BGR;15", "BGR;16", "BGR;24"):
deprecate(mode, 12)
base_mode, base_type, bands, type_str = modes[mode]
return ModeDescriptor(mode, bands, base_mode, base_type, type_str)
mapping_modes = {
# I;16 == I;16L, and I;32 == I;32L
"I;16": "u2",
"I;16BS": ">i2",
"I;16N": f"{endian}u2",
"I;16NS": f"{endian}i2",
"I;32": "u4",
"I;32L": "i4",
"I;32LS": "
venv\Lib\site-packages\PIL\ImageMorph.py
# A binary morphology add-on for the Python Imaging Library
#
# History:
# 2014-06-04 Initial version.
#
# Copyright (c) 2014 Dov Grobgeld
from __future__ import annotations
import re
from . import Image, _imagingmorph
LUT_SIZE = 1 << 9
# fmt: off
ROTATION_MATRIX = [
6, 3, 0,
7, 4, 1,
8, 5, 2,
]
MIRROR_MATRIX = [
2, 1, 0,
5, 4, 3,
8, 7, 6,
]
# fmt: on
class LutBuilder:
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these::
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
- . or X - Ignore
- 1 - Pixel is on
- 0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
- 4 - 4 way rotation
- N - Negate
- 1 - Dummy op for no other operation (an op must always be given)
- M - Mirroring
Example::
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(
self, patterns: list[str] | None = None, op_name: str | None = None
) -> None:
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut: bytearray | None = None
if op_name is not None:
known_patterns = {
"corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"],
"dilation4": ["4:(... .0. .1.)->1"],
"dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"],
"erosion4": ["4:(... .1. .0.)->0"],
"erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"],
"edge": [
"1:(... ... ...)->0",
"4:(.0. .1. ...)->1",
"4:(01. .1. ...)->1",
],
}
if op_name not in known_patterns:
msg = f"Unknown pattern {op_name}!"
raise Exception(msg)
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns: list[str]) -> None:
self.patterns += patterns
def build_default_lut(self) -> None:
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE))
def get_lut(self) -> bytearray | None:
return self.lut
def _string_permute(self, pattern: str, permutation: list[int]) -> str:
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert len(permutation) == 9
return "".join(pattern[p] for p in permutation)
def _pattern_permute(
self, basic_pattern: str, options: str, basic_result: int
) -> list[tuple[str, int]]:
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if "4" in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], ROTATION_MATRIX), res)
)
# mirror
if "M" in options:
n = len(patterns)
for pattern, res in patterns[:n]:
patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res))
# negate
if "N" in options:
n = len(patterns)
for pattern, res in patterns[:n]:
# Swap 0 and 1
pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1")
res = 1 - int(res)
patterns.append((pattern, res))
return patterns
def build_lut(self) -> bytearray:
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
assert self.lut is not None
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", ""))
if not m:
msg = 'Syntax error in pattern "' + p + '"'
raise Exception(msg)
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(" ", "").replace("\n", "")
patterns += self._pattern_permute(pattern, options, result)
# compile the patterns into regular expressions for speed
compiled_patterns = []
for pattern in patterns:
p = pattern[0].replace(".", "X").replace("X", "[01]")
compiled_patterns.append((re.compile(p), pattern[1]))
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1]
for pattern, r in compiled_patterns:
if pattern.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp:
"""A class for binary morphological operators"""
def __init__(
self,
lut: bytearray | None = None,
op_name: str | None = None,
patterns: list[str] | None = None,
) -> None:
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image: Image.Image) -> tuple[int, Image.Image]:
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
msg = "No operator loaded"
raise Exception(msg)
if image.mode != "L":
msg = "Image mode must be L"
raise ValueError(msg)
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(bytes(self.lut), image.getim(), outimage.getim())
return count, outimage
def match(self, image: Image.Image) -> list[tuple[int, int]]:
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels. See :ref:`coordinate-system`."""
if self.lut is None:
msg = "No operator loaded"
raise Exception(msg)
if image.mode != "L":
msg = "Image mode must be L"
raise ValueError(msg)
return _imagingmorph.match(bytes(self.lut), image.getim())
def get_on_pixels(self, image: Image.Image) -> list[tuple[int, int]]:
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels. See :ref:`coordinate-system`."""
if image.mode != "L":
msg = "Image mode must be L"
raise ValueError(msg)
return _imagingmorph.get_on_pixels(image.getim())
def load_lut(self, filename: str) -> None:
"""Load an operator from an mrl file"""
with open(filename, "rb") as f:
self.lut = bytearray(f.read())
if len(self.lut) != LUT_SIZE:
self.lut = None
msg = "Wrong size operator file!"
raise Exception(msg)
def save_lut(self, filename: str) -> None:
"""Save an operator to an mrl file"""
if self.lut is None:
msg = "No operator loaded"
raise Exception(msg)
with open(filename, "wb") as f:
f.write(self.lut)
def set_lut(self, lut: bytearray | None) -> None:
"""Set the lut from an external source"""
self.lut = lut
venv\Lib\site-packages\PIL\ImageOps.py
#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import functools
import operator
import re
from collections.abc import Sequence
from typing import Literal, Protocol, cast, overload
from . import ExifTags, Image, ImagePalette
#
# helpers
def _border(border: int | tuple[int, ...]) -> tuple[int, int, int, int]:
if isinstance(border, tuple):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color: str | int | tuple[int, ...], mode: str) -> int | tuple[int, ...]:
if isinstance(color, str):
from . import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image: Image.Image, lut: list[int]) -> Image.Image:
if image.mode == "P":
# FIXME: apply to lookup table, not image data
msg = "mode P support coming soon"
raise NotImplementedError(msg)
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
msg = f"not supported for mode {image.mode}"
raise OSError(msg)
#
# actions
def autocontrast(
image: Image.Image,
cutoff: float | tuple[float, float] = 0,
ignore: int | Sequence[int] | None = None,
mask: Image.Image | None = None,
preserve_tone: bool = False,
) -> Image.Image:
"""
Maximize (normalize) image contrast. This function calculates a
histogram of the input image (or mask region), removes ``cutoff`` percent of the
lightest and darkest pixels from the histogram, and remaps the image
so that the darkest pixel becomes black (0), and the lightest
becomes white (255).
:param image: The image to process.
:param cutoff: The percent to cut off from the histogram on the low and
high ends. Either a tuple of (low, high), or a single
number for both.
:param ignore: The background pixel value (use None for no background).
:param mask: Histogram used in contrast operation is computed using pixels
within the mask. If no mask is given the entire image is used
for histogram computation.
:param preserve_tone: Preserve image tone in Photoshop-like style autocontrast.
.. versionadded:: 8.2.0
:return: An image.
"""
if preserve_tone:
histogram = image.convert("L").histogram(mask)
else:
histogram = image.histogram(mask)
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer : layer + 256]
if ignore is not None:
# get rid of outliers
if isinstance(ignore, int):
h[ignore] = 0
else:
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
if not isinstance(cutoff, tuple):
cutoff = (cutoff, cutoff)
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = int(n * cutoff[0] // 100)
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] -= cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the high end
cut = int(n * cutoff[1] // 100)
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] -= cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(list(range(256)))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
def colorize(
image: Image.Image,
black: str | tuple[int, ...],
white: str | tuple[int, ...],
mid: str | int | tuple[int, ...] | None = None,
blackpoint: int = 0,
whitepoint: int = 255,
midpoint: int = 127,
) -> Image.Image:
"""
Colorize grayscale image.
This function calculates a color wedge which maps all black pixels in
the source image to the first color and all white pixels to the
second color. If ``mid`` is specified, it uses three-color mapping.
The ``black`` and ``white`` arguments should be RGB tuples or color names;
optionally you can use three-color mapping by also specifying ``mid``.
Mapping positions for any of the colors can be specified
(e.g. ``blackpoint``), where these parameters are the integer
value corresponding to where the corresponding color should be mapped.
These parameters must have logical order, such that
``blackpoint <= midpoint <= whitepoint`` (if ``mid`` is specified).
:param image: The image to colorize.
:param black: The color to use for black input pixels.
:param white: The color to use for white input pixels.
:param mid: The color to use for midtone input pixels.
:param blackpoint: an int value [0, 255] for the black mapping.
:param whitepoint: an int value [0, 255] for the white mapping.
:param midpoint: an int value [0, 255] for the midtone mapping.
:return: An image.
"""
# Initial asserts
assert image.mode == "L"
if mid is None:
assert 0 <= blackpoint <= whitepoint <= 255
else:
assert 0 <= blackpoint <= midpoint <= whitepoint <= 255
# Define colors from arguments
rgb_black = cast(Sequence[int], _color(black, "RGB"))
rgb_white = cast(Sequence[int], _color(white, "RGB"))
rgb_mid = cast(Sequence[int], _color(mid, "RGB")) if mid is not None else None
# Empty lists for the mapping
red = []
green = []
blue = []
# Create the low-end values
for i in range(blackpoint):
red.append(rgb_black[0])
green.append(rgb_black[1])
blue.append(rgb_black[2])
# Create the mapping (2-color)
if rgb_mid is None:
range_map = range(whitepoint - blackpoint)
for i in range_map:
red.append(
rgb_black[0] + i * (rgb_white[0] - rgb_black[0]) // len(range_map)
)
green.append(
rgb_black[1] + i * (rgb_white[1] - rgb_black[1]) // len(range_map)
)
blue.append(
rgb_black[2] + i * (rgb_white[2] - rgb_black[2]) // len(range_map)
)
# Create the mapping (3-color)
else:
range_map1 = range(midpoint - blackpoint)
range_map2 = range(whitepoint - midpoint)
for i in range_map1:
red.append(
rgb_black[0] + i * (rgb_mid[0] - rgb_black[0]) // len(range_map1)
)
green.append(
rgb_black[1] + i * (rgb_mid[1] - rgb_black[1]) // len(range_map1)
)
blue.append(
rgb_black[2] + i * (rgb_mid[2] - rgb_black[2]) // len(range_map1)
)
for i in range_map2:
red.append(rgb_mid[0] + i * (rgb_white[0] - rgb_mid[0]) // len(range_map2))
green.append(
rgb_mid[1] + i * (rgb_white[1] - rgb_mid[1]) // len(range_map2)
)
blue.append(rgb_mid[2] + i * (rgb_white[2] - rgb_mid[2]) // len(range_map2))
# Create the high-end values
for i in range(256 - whitepoint):
red.append(rgb_white[0])
green.append(rgb_white[1])
blue.append(rgb_white[2])
# Return converted image
image = image.convert("RGB")
return _lut(image, red + green + blue)
def contain(
image: Image.Image, size: tuple[int, int], method: int = Image.Resampling.BICUBIC
) -> Image.Image:
"""
Returns a resized version of the image, set to the maximum width and height
within the requested size, while maintaining the original aspect ratio.
:param image: The image to resize.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: Resampling method to use. Default is
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
See :ref:`concept-filters`.
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = size[0] / size[1]
if im_ratio != dest_ratio:
if im_ratio > dest_ratio:
new_height = round(image.height / image.width * size[0])
if new_height != size[1]:
size = (size[0], new_height)
else:
new_width = round(image.width / image.height * size[1])
if new_width != size[0]:
size = (new_width, size[1])
return image.resize(size, resample=method)
def cover(
image: Image.Image, size: tuple[int, int], method: int = Image.Resampling.BICUBIC
) -> Image.Image:
"""
Returns a resized version of the image, so that the requested size is
covered, while maintaining the original aspect ratio.
:param image: The image to resize.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: Resampling method to use. Default is
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
See :ref:`concept-filters`.
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = size[0] / size[1]
if im_ratio != dest_ratio:
if im_ratio < dest_ratio:
new_height = round(image.height / image.width * size[0])
if new_height != size[1]:
size = (size[0], new_height)
else:
new_width = round(image.width / image.height * size[1])
if new_width != size[0]:
size = (new_width, size[1])
return image.resize(size, resample=method)
def pad(
image: Image.Image,
size: tuple[int, int],
method: int = Image.Resampling.BICUBIC,
color: str | int | tuple[int, ...] | None = None,
centering: tuple[float, float] = (0.5, 0.5),
) -> Image.Image:
"""
Returns a resized and padded version of the image, expanded to fill the
requested aspect ratio and size.
:param image: The image to resize and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: Resampling method to use. Default is
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
See :ref:`concept-filters`.
:param color: The background color of the padded image.
:param centering: Control the position of the original image within the
padded version.
(0.5, 0.5) will keep the image centered
(0, 0) will keep the image aligned to the top left
(1, 1) will keep the image aligned to the bottom
right
:return: An image.
"""
resized = contain(image, size, method)
if resized.size == size:
out = resized
else:
out = Image.new(image.mode, size, color)
if resized.palette:
palette = resized.getpalette()
if palette is not None:
out.putpalette(palette)
if resized.width != size[0]:
x = round((size[0] - resized.width) * max(0, min(centering[0], 1)))
out.paste(resized, (x, 0))
else:
y = round((size[1] - resized.height) * max(0, min(centering[1], 1)))
out.paste(resized, (0, y))
return out
def crop(image: Image.Image, border: int = 0) -> Image.Image:
"""
Remove border from image. The same amount of pixels are removed
from all four sides. This function works on all image modes.
.. seealso:: :py:meth:`~PIL.Image.Image.crop`
:param image: The image to crop.
:param border: The number of pixels to remove.
:return: An image.
"""
left, top, right, bottom = _border(border)
return image.crop((left, top, image.size[0] - right, image.size[1] - bottom))
def scale(
image: Image.Image, factor: float, resample: int = Image.Resampling.BICUBIC
) -> Image.Image:
"""
Returns a rescaled image by a specific factor given in parameter.
A factor greater than 1 expands the image, between 0 and 1 contracts the
image.
:param image: The image to rescale.
:param factor: The expansion factor, as a float.
:param resample: Resampling method to use. Default is
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
See :ref:`concept-filters`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if factor == 1:
return image.copy()
elif factor <= 0:
msg = "the factor must be greater than 0"
raise ValueError(msg)
else:
size = (round(factor * image.width), round(factor * image.height))
return image.resize(size, resample)
class SupportsGetMesh(Protocol):
"""
An object that supports the ``getmesh`` method, taking an image as an
argument, and returning a list of tuples. Each tuple contains two tuples,
the source box as a tuple of 4 integers, and a tuple of 8 integers for the
final quadrilateral, in order of top left, bottom left, bottom right, top
right.
"""
def getmesh(
self, image: Image.Image
) -> list[
tuple[tuple[int, int, int, int], tuple[int, int, int, int, int, int, int, int]]
]: ...
def deform(
image: Image.Image,
deformer: SupportsGetMesh,
resample: int = Image.Resampling.BILINEAR,
) -> Image.Image:
"""
Deform the image.
:param image: The image to deform.
:param deformer: A deformer object. Any object that implements a
``getmesh`` method can be used.
:param resample: An optional resampling filter. Same values possible as
in the PIL.Image.transform function.
:return: An image.
"""
return image.transform(
image.size, Image.Transform.MESH, deformer.getmesh(image), resample
)
def equalize(image: Image.Image, mask: Image.Image | None = None) -> Image.Image:
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
:param image: The image to equalize.
:param mask: An optional mask. If given, only the pixels selected by
the mask are included in the analysis.
:return: An image.
"""
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = [_f for _f in h[b : b + 256] if _f]
if len(histo) <= 1:
lut.extend(list(range(256)))
else:
step = (functools.reduce(operator.add, histo) - histo[-1]) // 255
if not step:
lut.extend(list(range(256)))
else:
n = step // 2
for i in range(256):
lut.append(n // step)
n = n + h[i + b]
return _lut(image, lut)
def expand(
image: Image.Image,
border: int | tuple[int, ...] = 0,
fill: str | int | tuple[int, ...] = 0,
) -> Image.Image:
"""
Add border to the image
:param image: The image to expand.
:param border: Border width, in pixels.
:param fill: Pixel fill value (a color value). Default is 0 (black).
:return: An image.
"""
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
color = _color(fill, image.mode)
if image.palette:
palette = ImagePalette.ImagePalette(palette=image.getpalette())
if isinstance(color, tuple) and (len(color) == 3 or len(color) == 4):
color = palette.getcolor(color)
else:
palette = None
out = Image.new(image.mode, (width, height), color)
if palette:
out.putpalette(palette.palette)
out.paste(image, (left, top))
return out
def fit(
image: Image.Image,
size: tuple[int, int],
method: int = Image.Resampling.BICUBIC,
bleed: float = 0.0,
centering: tuple[float, float] = (0.5, 0.5),
) -> Image.Image:
"""
Returns a resized and cropped version of the image, cropped to the
requested aspect ratio and size.
This function was contributed by Kevin Cazabon.
:param image: The image to resize and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: Resampling method to use. Default is
:py:attr:`~PIL.Image.Resampling.BICUBIC`.
See :ref:`concept-filters`.
:param bleed: Remove a border around the outside of the image from all
four edges. The value is a decimal percentage (use 0.01 for
one percent). The default value is 0 (no border).
Cannot be greater than or equal to 0.5.
:param centering: Control the cropping position. Use (0.5, 0.5) for
center cropping (e.g. if cropping the width, take 50% off
of the left side, and therefore 50% off the right side).
(0.0, 0.0) will crop from the top left corner (i.e. if
cropping the width, take all of the crop off of the right
side, and if cropping the height, take all of it off the
bottom). (1.0, 0.0) will crop from the bottom left
corner, etc. (i.e. if cropping the width, take all of the
crop off the left side, and if cropping the height take
none from the top, and therefore all off the bottom).
:return: An image.
"""
# by Kevin Cazabon, Feb 17/2000
# kevin@cazabon.com
# https://www.cazabon.com
centering_x, centering_y = centering
if not 0.0 <= centering_x <= 1.0:
centering_x = 0.5
if not 0.0 <= centering_y <= 1.0:
centering_y = 0.5
if not 0.0 <= bleed < 0.5:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleed_pixels = (bleed * image.size[0], bleed * image.size[1])
live_size = (
image.size[0] - bleed_pixels[0] * 2,
image.size[1] - bleed_pixels[1] * 2,
)
# calculate the aspect ratio of the live_size
live_size_ratio = live_size[0] / live_size[1]
# calculate the aspect ratio of the output image
output_ratio = size[0] / size[1]
# figure out if the sides or top/bottom will be cropped off
if live_size_ratio == output_ratio:
# live_size is already the needed ratio
crop_width = live_size[0]
crop_height = live_size[1]
elif live_size_ratio >= output_ratio:
# live_size is wider than what's needed, crop the sides
crop_width = output_ratio * live_size[1]
crop_height = live_size[1]
else:
# live_size is taller than what's needed, crop the top and bottom
crop_width = live_size[0]
crop_height = live_size[0] / output_ratio
# make the crop
crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering_x
crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering_y
crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)
# resize the image and return it
return image.resize(size, method, box=crop)
def flip(image: Image.Image) -> Image.Image:
"""
Flip the image vertically (top to bottom).
:param image: The image to flip.
:return: An image.
"""
return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM)
def grayscale(image: Image.Image) -> Image.Image:
"""
Convert the image to grayscale.
:param image: The image to convert.
:return: An image.
"""
return image.convert("L")
def invert(image: Image.Image) -> Image.Image:
"""
Invert (negate) the image.
:param image: The image to invert.
:return: An image.
"""
lut = list(range(255, -1, -1))
return image.point(lut) if image.mode == "1" else _lut(image, lut)
def mirror(image: Image.Image) -> Image.Image:
"""
Flip image horizontally (left to right).
:param image: The image to mirror.
:return: An image.
"""
return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
def posterize(image: Image.Image, bits: int) -> Image.Image:
"""
Reduce the number of bits for each color channel.
:param image: The image to posterize.
:param bits: The number of bits to keep for each channel (1-8).
:return: An image.
"""
mask = ~(2 ** (8 - bits) - 1)
lut = [i & mask for i in range(256)]
return _lut(image, lut)
def solarize(image: Image.Image, threshold: int = 128) -> Image.Image:
"""
Invert all pixel values above a threshold.
:param image: The image to solarize.
:param threshold: All pixels above this grayscale level are inverted.
:return: An image.
"""
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255 - i)
return _lut(image, lut)
@overload
def exif_transpose(image: Image.Image, *, in_place: Literal[True]) -> None: ...
@overload
def exif_transpose(
image: Image.Image, *, in_place: Literal[False] = False
) -> Image.Image: ...
def exif_transpose(image: Image.Image, *, in_place: bool = False) -> Image.Image | None:
"""
If an image has an EXIF Orientation tag, other than 1, transpose the image
accordingly, and remove the orientation data.
:param image: The image to transpose.
:param in_place: Boolean. Keyword-only argument.
If ``True``, the original image is modified in-place, and ``None`` is returned.
If ``False`` (default), a new :py:class:`~PIL.Image.Image` object is returned
with the transposition applied. If there is no transposition, a copy of the
image will be returned.
"""
image.load()
image_exif = image.getexif()
orientation = image_exif.get(ExifTags.Base.Orientation, 1)
method = {
2: Image.Transpose.FLIP_LEFT_RIGHT,
3: Image.Transpose.ROTATE_180,
4: Image.Transpose.FLIP_TOP_BOTTOM,
5: Image.Transpose.TRANSPOSE,
6: Image.Transpose.ROTATE_270,
7: Image.Transpose.TRANSVERSE,
8: Image.Transpose.ROTATE_90,
}.get(orientation)
if method is not None:
if in_place:
image.im = image.im.transpose(method)
image._size = image.im.size
else:
transposed_image = image.transpose(method)
exif_image = image if in_place else transposed_image
exif = exif_image.getexif()
if ExifTags.Base.Orientation in exif:
del exif[ExifTags.Base.Orientation]
if "exif" in exif_image.info:
exif_image.info["exif"] = exif.tobytes()
elif "Raw profile type exif" in exif_image.info:
exif_image.info["Raw profile type exif"] = exif.tobytes().hex()
for key in ("XML:com.adobe.xmp", "xmp"):
if key in exif_image.info:
for pattern in (
r'tiff:Orientation="([0-9])"',
r"([0-9])",
):
value = exif_image.info[key]
if isinstance(value, str):
value = re.sub(pattern, "", value)
elif isinstance(value, tuple):
value = tuple(
re.sub(pattern.encode(), b"", v) for v in value
)
else:
value = re.sub(pattern.encode(), b"", value)
exif_image.info[key] = value
if not in_place:
return transposed_image
elif not in_place:
return image.copy()
return None
venv\Lib\site-packages\PIL\ImagePalette.py
#
# The Python Imaging Library.
# $Id$
#
# image palette object
#
# History:
# 1996-03-11 fl Rewritten.
# 1997-01-03 fl Up and running.
# 1997-08-23 fl Added load hack
# 2001-04-16 fl Fixed randint shadow bug in random()
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import array
from collections.abc import Sequence
from typing import IO
from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile
TYPE_CHECKING = False
if TYPE_CHECKING:
from . import Image
class ImagePalette:
"""
Color palette for palette mapped images
:param mode: The mode to use for the palette. See:
:ref:`concept-modes`. Defaults to "RGB"
:param palette: An optional palette. If given, it must be a bytearray,
an array or a list of ints between 0-255. The list must consist of
all channels for one color followed by the next color (e.g. RGBRGBRGB).
Defaults to an empty palette.
"""
def __init__(
self,
mode: str = "RGB",
palette: Sequence[int] | bytes | bytearray | None = None,
) -> None:
self.mode = mode
self.rawmode: str | None = None # if set, palette contains raw data
self.palette = palette or bytearray()
self.dirty: int | None = None
@property
def palette(self) -> Sequence[int] | bytes | bytearray:
return self._palette
@palette.setter
def palette(self, palette: Sequence[int] | bytes | bytearray) -> None:
self._colors: dict[tuple[int, ...], int] | None = None
self._palette = palette
@property
def colors(self) -> dict[tuple[int, ...], int]:
if self._colors is None:
mode_len = len(self.mode)
self._colors = {}
for i in range(0, len(self.palette), mode_len):
color = tuple(self.palette[i : i + mode_len])
if color in self._colors:
continue
self._colors[color] = i // mode_len
return self._colors
@colors.setter
def colors(self, colors: dict[tuple[int, ...], int]) -> None:
self._colors = colors
def copy(self) -> ImagePalette:
new = ImagePalette()
new.mode = self.mode
new.rawmode = self.rawmode
if self.palette is not None:
new.palette = self.palette[:]
new.dirty = self.dirty
return new
def getdata(self) -> tuple[str, Sequence[int] | bytes | bytearray]:
"""
Get palette contents in format suitable for the low-level
``im.putpalette`` primitive.
.. warning:: This method is experimental.
"""
if self.rawmode:
return self.rawmode, self.palette
return self.mode, self.tobytes()
def tobytes(self) -> bytes:
"""Convert palette to bytes.
.. warning:: This method is experimental.
"""
if self.rawmode:
msg = "palette contains raw palette data"
raise ValueError(msg)
if isinstance(self.palette, bytes):
return self.palette
arr = array.array("B", self.palette)
return arr.tobytes()
# Declare tostring as an alias for tobytes
tostring = tobytes
def _new_color_index(
self, image: Image.Image | None = None, e: Exception | None = None
) -> int:
if not isinstance(self.palette, bytearray):
self._palette = bytearray(self.palette)
index = len(self.palette) // 3
special_colors: tuple[int | tuple[int, ...] | None, ...] = ()
if image:
special_colors = (
image.info.get("background"),
image.info.get("transparency"),
)
while index in special_colors:
index += 1
if index >= 256:
if image:
# Search for an unused index
for i, count in reversed(list(enumerate(image.histogram()))):
if count == 0 and i not in special_colors:
index = i
break
if index >= 256:
msg = "cannot allocate more than 256 colors"
raise ValueError(msg) from e
return index
def getcolor(
self,
color: tuple[int, ...],
image: Image.Image | None = None,
) -> int:
"""Given an rgb tuple, allocate palette entry.
.. warning:: This method is experimental.
"""
if self.rawmode:
msg = "palette contains raw palette data"
raise ValueError(msg)
if isinstance(color, tuple):
if self.mode == "RGB":
if len(color) == 4:
if color[3] != 255:
msg = "cannot add non-opaque RGBA color to RGB palette"
raise ValueError(msg)
color = color[:3]
elif self.mode == "RGBA":
if len(color) == 3:
color += (255,)
try:
return self.colors[color]
except KeyError as e:
# allocate new color slot
index = self._new_color_index(image, e)
assert isinstance(self._palette, bytearray)
self.colors[color] = index
if index * 3 < len(self.palette):
self._palette = (
self._palette[: index * 3]
+ bytes(color)
+ self._palette[index * 3 + 3 :]
)
else:
self._palette += bytes(color)
self.dirty = 1
return index
else:
msg = f"unknown color specifier: {repr(color)}" # type: ignore[unreachable]
raise ValueError(msg)
def save(self, fp: str | IO[str]) -> None:
"""Save palette to text file.
.. warning:: This method is experimental.
"""
if self.rawmode:
msg = "palette contains raw palette data"
raise ValueError(msg)
if isinstance(fp, str):
fp = open(fp, "w")
fp.write("# Palette\n")
fp.write(f"# Mode: {self.mode}\n")
for i in range(256):
fp.write(f"{i}")
for j in range(i * len(self.mode), (i + 1) * len(self.mode)):
try:
fp.write(f" {self.palette[j]}")
except IndexError:
fp.write(" 0")
fp.write("\n")
fp.close()
# --------------------------------------------------------------------
# Internal
def raw(rawmode: str, data: Sequence[int] | bytes | bytearray) -> ImagePalette:
palette = ImagePalette()
palette.rawmode = rawmode
palette.palette = data
palette.dirty = 1
return palette
# --------------------------------------------------------------------
# Factories
def make_linear_lut(black: int, white: float) -> list[int]:
if black == 0:
return [int(white * i // 255) for i in range(256)]
msg = "unavailable when black is non-zero"
raise NotImplementedError(msg) # FIXME
def make_gamma_lut(exp: float) -> list[int]:
return [int(((i / 255.0) ** exp) * 255.0 + 0.5) for i in range(256)]
def negative(mode: str = "RGB") -> ImagePalette:
palette = list(range(256 * len(mode)))
palette.reverse()
return ImagePalette(mode, [i // len(mode) for i in palette])
def random(mode: str = "RGB") -> ImagePalette:
from random import randint
palette = [randint(0, 255) for _ in range(256 * len(mode))]
return ImagePalette(mode, palette)
def sepia(white: str = "#fff0c0") -> ImagePalette:
bands = [make_linear_lut(0, band) for band in ImageColor.getrgb(white)]
return ImagePalette("RGB", [bands[i % 3][i // 3] for i in range(256 * 3)])
def wedge(mode: str = "RGB") -> ImagePalette:
palette = list(range(256 * len(mode)))
return ImagePalette(mode, [i // len(mode) for i in palette])
def load(filename: str) -> tuple[bytes, str]:
# FIXME: supports GIMP gradients only
with open(filename, "rb") as fp:
paletteHandlers: list[
type[
GimpPaletteFile.GimpPaletteFile
| GimpGradientFile.GimpGradientFile
| PaletteFile.PaletteFile
]
] = [
GimpPaletteFile.GimpPaletteFile,
GimpGradientFile.GimpGradientFile,
PaletteFile.PaletteFile,
]
for paletteHandler in paletteHandlers:
try:
fp.seek(0)
lut = paletteHandler(fp).getpalette()
if lut:
break
except (SyntaxError, ValueError):
pass
else:
msg = "cannot load palette"
raise OSError(msg)
return lut # data, rawmode
venv\Lib\site-packages\PIL\ImagePath.py
#
# The Python Imaging Library
# $Id$
#
# path interface
#
# History:
# 1996-11-04 fl Created
# 2002-04-14 fl Added documentation stub class
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image
Path = Image.core.path
venv\Lib\site-packages\PIL\ImageQt.py
#
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com)
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import sys
from io import BytesIO
from typing import Any, Callable, Union
from . import Image
from ._util import is_path
TYPE_CHECKING = False
if TYPE_CHECKING:
import PyQt6
import PySide6
from . import ImageFile
QBuffer: type
QByteArray = Union[PyQt6.QtCore.QByteArray, PySide6.QtCore.QByteArray]
QIODevice = Union[PyQt6.QtCore.QIODevice, PySide6.QtCore.QIODevice]
QImage = Union[PyQt6.QtGui.QImage, PySide6.QtGui.QImage]
QPixmap = Union[PyQt6.QtGui.QPixmap, PySide6.QtGui.QPixmap]
qt_version: str | None
qt_versions = [
["6", "PyQt6"],
["side6", "PySide6"],
]
# If a version has already been imported, attempt it first
qt_versions.sort(key=lambda version: version[1] in sys.modules, reverse=True)
for version, qt_module in qt_versions:
try:
qRgba: Callable[[int, int, int, int], int]
if qt_module == "PyQt6":
from PyQt6.QtCore import QBuffer, QIODevice
from PyQt6.QtGui import QImage, QPixmap, qRgba
elif qt_module == "PySide6":
from PySide6.QtCore import QBuffer, QIODevice
from PySide6.QtGui import QImage, QPixmap, qRgba
except (ImportError, RuntimeError):
continue
qt_is_installed = True
qt_version = version
break
else:
qt_is_installed = False
qt_version = None
def rgb(r: int, g: int, b: int, a: int = 255) -> int:
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return qRgba(r, g, b, a) & 0xFFFFFFFF
def fromqimage(im: QImage | QPixmap) -> ImageFile.ImageFile:
"""
:param im: QImage or PIL ImageQt object
"""
buffer = QBuffer()
qt_openmode: object
if qt_version == "6":
try:
qt_openmode = getattr(QIODevice, "OpenModeFlag")
except AttributeError:
qt_openmode = getattr(QIODevice, "OpenMode")
else:
qt_openmode = QIODevice
buffer.open(getattr(qt_openmode, "ReadWrite"))
# preserve alpha channel with png
# otherwise ppm is more friendly with Image.open
if im.hasAlphaChannel():
im.save(buffer, "png")
else:
im.save(buffer, "ppm")
b = BytesIO()
b.write(buffer.data())
buffer.close()
b.seek(0)
return Image.open(b)
def fromqpixmap(im: QPixmap) -> ImageFile.ImageFile:
return fromqimage(im)
def align8to32(bytes: bytes, width: int, mode: str) -> bytes:
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {"1": 1, "L": 8, "P": 8, "I;16": 16}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = [
bytes[i * bytes_per_line : (i + 1) * bytes_per_line] + b"\x00" * extra_padding
for i in range(len(bytes) // bytes_per_line)
]
return b"".join(new_data)
def _toqclass_helper(im: Image.Image | str | QByteArray) -> dict[str, Any]:
data = None
colortable = None
exclusive_fp = False
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
im = str(im.toUtf8(), "utf-8")
if is_path(im):
im = Image.open(im)
exclusive_fp = True
assert isinstance(im, Image.Image)
qt_format = getattr(QImage, "Format") if qt_version == "6" else QImage
if im.mode == "1":
format = getattr(qt_format, "Format_Mono")
elif im.mode == "L":
format = getattr(qt_format, "Format_Indexed8")
colortable = [rgb(i, i, i) for i in range(256)]
elif im.mode == "P":
format = getattr(qt_format, "Format_Indexed8")
palette = im.getpalette()
assert palette is not None
colortable = [rgb(*palette[i : i + 3]) for i in range(0, len(palette), 3)]
elif im.mode == "RGB":
# Populate the 4th channel with 255
im = im.convert("RGBA")
data = im.tobytes("raw", "BGRA")
format = getattr(qt_format, "Format_RGB32")
elif im.mode == "RGBA":
data = im.tobytes("raw", "BGRA")
format = getattr(qt_format, "Format_ARGB32")
elif im.mode == "I;16":
im = im.point(lambda i: i * 256)
format = getattr(qt_format, "Format_Grayscale16")
else:
if exclusive_fp:
im.close()
msg = f"unsupported image mode {repr(im.mode)}"
raise ValueError(msg)
size = im.size
__data = data or align8to32(im.tobytes(), size[0], im.mode)
if exclusive_fp:
im.close()
return {"data": __data, "size": size, "format": format, "colortable": colortable}
if qt_is_installed:
class ImageQt(QImage): # type: ignore[misc]
def __init__(self, im: Image.Image | str | QByteArray) -> None:
"""
An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
class.
:param im: A PIL Image object, or a file name (given either as
Python string or a PyQt string object).
"""
im_data = _toqclass_helper(im)
# must keep a reference, or Qt will crash!
# All QImage constructors that take data operate on an existing
# buffer, so this buffer has to hang on for the life of the image.
# Fixes https://github.com/python-pillow/Pillow/issues/1370
self.__data = im_data["data"]
super().__init__(
self.__data,
im_data["size"][0],
im_data["size"][1],
im_data["format"],
)
if im_data["colortable"]:
self.setColorTable(im_data["colortable"])
def toqimage(im: Image.Image | str | QByteArray) -> ImageQt:
return ImageQt(im)
def toqpixmap(im: Image.Image | str | QByteArray) -> QPixmap:
qimage = toqimage(im)
pixmap = getattr(QPixmap, "fromImage")(qimage)
if qt_version == "6":
pixmap.detach()
return pixmap
venv\Lib\site-packages\PIL\ImageSequence.py
#
# The Python Imaging Library.
# $Id$
#
# sequence support classes
#
# history:
# 1997-02-20 fl Created
#
# Copyright (c) 1997 by Secret Labs AB.
# Copyright (c) 1997 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
##
from __future__ import annotations
from typing import Callable
from . import Image
class Iterator:
"""
This class implements an iterator object that can be used to loop
over an image sequence.
You can use the ``[]`` operator to access elements by index. This operator
will raise an :py:exc:`IndexError` if you try to access a nonexistent
frame.
:param im: An image object.
"""
def __init__(self, im: Image.Image) -> None:
if not hasattr(im, "seek"):
msg = "im must have seek method"
raise AttributeError(msg)
self.im = im
self.position = getattr(self.im, "_min_frame", 0)
def __getitem__(self, ix: int) -> Image.Image:
try:
self.im.seek(ix)
return self.im
except EOFError as e:
msg = "end of sequence"
raise IndexError(msg) from e
def __iter__(self) -> Iterator:
return self
def __next__(self) -> Image.Image:
try:
self.im.seek(self.position)
self.position += 1
return self.im
except EOFError as e:
msg = "end of sequence"
raise StopIteration(msg) from e
def all_frames(
im: Image.Image | list[Image.Image],
func: Callable[[Image.Image], Image.Image] | None = None,
) -> list[Image.Image]:
"""
Applies a given function to all frames in an image or a list of images.
The frames are returned as a list of separate images.
:param im: An image, or a list of images.
:param func: The function to apply to all of the image frames.
:returns: A list of images.
"""
if not isinstance(im, list):
im = [im]
ims = []
for imSequence in im:
current = imSequence.tell()
ims += [im_frame.copy() for im_frame in Iterator(imSequence)]
imSequence.seek(current)
return [func(im) for im in ims] if func else ims
venv\Lib\site-packages\PIL\ImageShow.py
#
# The Python Imaging Library.
# $Id$
#
# im.show() drivers
#
# History:
# 2008-04-06 fl Created
#
# Copyright (c) Secret Labs AB 2008.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import abc
import os
import shutil
import subprocess
import sys
from shlex import quote
from typing import Any
from . import Image
_viewers = []
def register(viewer: type[Viewer] | Viewer, order: int = 1) -> None:
"""
The :py:func:`register` function is used to register additional viewers::
from PIL import ImageShow
ImageShow.register(MyViewer()) # MyViewer will be used as a last resort
ImageShow.register(MySecondViewer(), 0) # MySecondViewer will be prioritised
ImageShow.register(ImageShow.XVViewer(), 0) # XVViewer will be prioritised
:param viewer: The viewer to be registered.
:param order:
Zero or a negative integer to prepend this viewer to the list,
a positive integer to append it.
"""
if isinstance(viewer, type) and issubclass(viewer, Viewer):
viewer = viewer()
if order > 0:
_viewers.append(viewer)
else:
_viewers.insert(0, viewer)
def show(image: Image.Image, title: str | None = None, **options: Any) -> bool:
r"""
Display a given image.
:param image: An image object.
:param title: Optional title. Not all viewers can display the title.
:param \**options: Additional viewer options.
:returns: ``True`` if a suitable viewer was found, ``False`` otherwise.
"""
for viewer in _viewers:
if viewer.show(image, title=title, **options):
return True
return False
class Viewer:
"""Base class for viewers."""
# main api
def show(self, image: Image.Image, **options: Any) -> int:
"""
The main function for displaying an image.
Converts the given image to the target format and displays it.
"""
if not (
image.mode in ("1", "RGBA")
or (self.format == "PNG" and image.mode in ("I;16", "LA"))
):
base = Image.getmodebase(image.mode)
if image.mode != base:
image = image.convert(base)
return self.show_image(image, **options)
# hook methods
format: str | None = None
"""The format to convert the image into."""
options: dict[str, Any] = {}
"""Additional options used to convert the image."""
def get_format(self, image: Image.Image) -> str | None:
"""Return format name, or ``None`` to save as PGM/PPM."""
return self.format
def get_command(self, file: str, **options: Any) -> str:
"""
Returns the command used to display the file.
Not implemented in the base class.
"""
msg = "unavailable in base viewer"
raise NotImplementedError(msg)
def save_image(self, image: Image.Image) -> str:
"""Save to temporary file and return filename."""
return image._dump(format=self.get_format(image), **self.options)
def show_image(self, image: Image.Image, **options: Any) -> int:
"""Display the given image."""
return self.show_file(self.save_image(image), **options)
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
os.system(self.get_command(path, **options)) # nosec
return 1
# --------------------------------------------------------------------
class WindowsViewer(Viewer):
"""The default viewer on Windows is the default system application for PNG files."""
format = "PNG"
options = {"compress_level": 1, "save_all": True}
def get_command(self, file: str, **options: Any) -> str:
return (
f'start "Pillow" /WAIT "{file}" '
"&& ping -n 4 127.0.0.1 >NUL "
f'&& del /f "{file}"'
)
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
subprocess.Popen(
self.get_command(path, **options),
shell=True,
creationflags=getattr(subprocess, "CREATE_NO_WINDOW"),
) # nosec
return 1
if sys.platform == "win32":
register(WindowsViewer)
class MacViewer(Viewer):
"""The default viewer on macOS using ``Preview.app``."""
format = "PNG"
options = {"compress_level": 1, "save_all": True}
def get_command(self, file: str, **options: Any) -> str:
# on darwin open returns immediately resulting in the temp
# file removal while app is opening
command = "open -a Preview.app"
command = f"({command} {quote(file)}; sleep 20; rm -f {quote(file)})&"
return command
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
subprocess.call(["open", "-a", "Preview.app", path])
pyinstaller = getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS")
executable = (not pyinstaller and sys.executable) or shutil.which("python3")
if executable:
subprocess.Popen(
[
executable,
"-c",
"import os, sys, time; time.sleep(20); os.remove(sys.argv[1])",
path,
]
)
return 1
if sys.platform == "darwin":
register(MacViewer)
class UnixViewer(abc.ABC, Viewer):
format = "PNG"
options = {"compress_level": 1, "save_all": True}
@abc.abstractmethod
def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]:
pass
def get_command(self, file: str, **options: Any) -> str:
command = self.get_command_ex(file, **options)[0]
return f"{command} {quote(file)}"
class XDGViewer(UnixViewer):
"""
The freedesktop.org ``xdg-open`` command.
"""
def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]:
command = executable = "xdg-open"
return command, executable
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
subprocess.Popen(["xdg-open", path])
return 1
class DisplayViewer(UnixViewer):
"""
The ImageMagick ``display`` command.
This viewer supports the ``title`` parameter.
"""
def get_command_ex(
self, file: str, title: str | None = None, **options: Any
) -> tuple[str, str]:
command = executable = "display"
if title:
command += f" -title {quote(title)}"
return command, executable
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
args = ["display"]
title = options.get("title")
if title:
args += ["-title", title]
args.append(path)
subprocess.Popen(args)
return 1
class GmDisplayViewer(UnixViewer):
"""The GraphicsMagick ``gm display`` command."""
def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]:
executable = "gm"
command = "gm display"
return command, executable
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
subprocess.Popen(["gm", "display", path])
return 1
class EogViewer(UnixViewer):
"""The GNOME Image Viewer ``eog`` command."""
def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]:
executable = "eog"
command = "eog -n"
return command, executable
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
subprocess.Popen(["eog", "-n", path])
return 1
class XVViewer(UnixViewer):
"""
The X Viewer ``xv`` command.
This viewer supports the ``title`` parameter.
"""
def get_command_ex(
self, file: str, title: str | None = None, **options: Any
) -> tuple[str, str]:
# note: xv is pretty outdated. most modern systems have
# imagemagick's display command instead.
command = executable = "xv"
if title:
command += f" -name {quote(title)}"
return command, executable
def show_file(self, path: str, **options: Any) -> int:
"""
Display given file.
"""
if not os.path.exists(path):
raise FileNotFoundError
args = ["xv"]
title = options.get("title")
if title:
args += ["-name", title]
args.append(path)
subprocess.Popen(args)
return 1
if sys.platform not in ("win32", "darwin"): # unixoids
if shutil.which("xdg-open"):
register(XDGViewer)
if shutil.which("display"):
register(DisplayViewer)
if shutil.which("gm"):
register(GmDisplayViewer)
if shutil.which("eog"):
register(EogViewer)
if shutil.which("xv"):
register(XVViewer)
class IPythonViewer(Viewer):
"""The viewer for IPython frontends."""
def show_image(self, image: Image.Image, **options: Any) -> int:
ipython_display(image)
return 1
try:
from IPython.display import display as ipython_display
except ImportError:
pass
else:
register(IPythonViewer)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Syntax: python3 ImageShow.py imagefile [title]")
sys.exit()
with Image.open(sys.argv[1]) as im:
print(show(im, *sys.argv[2:]))
venv\Lib\site-packages\PIL\ImageStat.py
#
# The Python Imaging Library.
# $Id$
#
# global image statistics
#
# History:
# 1996-04-05 fl Created
# 1997-05-21 fl Added mask; added rms, var, stddev attributes
# 1997-08-05 fl Added median
# 1998-07-05 hk Fixed integer overflow error
#
# Notes:
# This class shows how to implement delayed evaluation of attributes.
# To get a certain value, simply access the corresponding attribute.
# The __getattr__ dispatcher takes care of the rest.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996-97.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import math
from functools import cached_property
from . import Image
class Stat:
def __init__(
self, image_or_list: Image.Image | list[int], mask: Image.Image | None = None
) -> None:
"""
Calculate statistics for the given image. If a mask is included,
only the regions covered by that mask are included in the
statistics. You can also pass in a previously calculated histogram.
:param image: A PIL image, or a precalculated histogram.
.. note::
For a PIL image, calculations rely on the
:py:meth:`~PIL.Image.Image.histogram` method. The pixel counts are
grouped into 256 bins, even if the image has more than 8 bits per
channel. So ``I`` and ``F`` mode images have a maximum ``mean``,
``median`` and ``rms`` of 255, and cannot have an ``extrema`` maximum
of more than 255.
:param mask: An optional mask.
"""
if isinstance(image_or_list, Image.Image):
self.h = image_or_list.histogram(mask)
elif isinstance(image_or_list, list):
self.h = image_or_list
else:
msg = "first argument must be image or list" # type: ignore[unreachable]
raise TypeError(msg)
self.bands = list(range(len(self.h) // 256))
@cached_property
def extrema(self) -> list[tuple[int, int]]:
"""
Min/max values for each band in the image.
.. note::
This relies on the :py:meth:`~PIL.Image.Image.histogram` method, and
simply returns the low and high bins used. This is correct for
images with 8 bits per channel, but fails for other modes such as
``I`` or ``F``. Instead, use :py:meth:`~PIL.Image.Image.getextrema` to
return per-band extrema for the image. This is more correct and
efficient because, for non-8-bit modes, the histogram method uses
:py:meth:`~PIL.Image.Image.getextrema` to determine the bins used.
"""
def minmax(histogram: list[int]) -> tuple[int, int]:
res_min, res_max = 255, 0
for i in range(256):
if histogram[i]:
res_min = i
break
for i in range(255, -1, -1):
if histogram[i]:
res_max = i
break
return res_min, res_max
return [minmax(self.h[i:]) for i in range(0, len(self.h), 256)]
@cached_property
def count(self) -> list[int]:
"""Total number of pixels for each band in the image."""
return [sum(self.h[i : i + 256]) for i in range(0, len(self.h), 256)]
@cached_property
def sum(self) -> list[float]:
"""Sum of all pixels for each band in the image."""
v = []
for i in range(0, len(self.h), 256):
layer_sum = 0.0
for j in range(256):
layer_sum += j * self.h[i + j]
v.append(layer_sum)
return v
@cached_property
def sum2(self) -> list[float]:
"""Squared sum of all pixels for each band in the image."""
v = []
for i in range(0, len(self.h), 256):
sum2 = 0.0
for j in range(256):
sum2 += (j**2) * float(self.h[i + j])
v.append(sum2)
return v
@cached_property
def mean(self) -> list[float]:
"""Average (arithmetic mean) pixel level for each band in the image."""
return [self.sum[i] / self.count[i] for i in self.bands]
@cached_property
def median(self) -> list[int]:
"""Median pixel level for each band in the image."""
v = []
for i in self.bands:
s = 0
half = self.count[i] // 2
b = i * 256
for j in range(256):
s = s + self.h[b + j]
if s > half:
break
v.append(j)
return v
@cached_property
def rms(self) -> list[float]:
"""RMS (root-mean-square) for each band in the image."""
return [math.sqrt(self.sum2[i] / self.count[i]) for i in self.bands]
@cached_property
def var(self) -> list[float]:
"""Variance for each band in the image."""
return [
(self.sum2[i] - (self.sum[i] ** 2.0) / self.count[i]) / self.count[i]
for i in self.bands
]
@cached_property
def stddev(self) -> list[float]:
"""Standard deviation for each band in the image."""
return [math.sqrt(self.var[i]) for i in self.bands]
Global = Stat # compatibility
venv\Lib\site-packages\PIL\ImageTk.py
#
# The Python Imaging Library.
# $Id$
#
# a Tk display interface
#
# History:
# 96-04-08 fl Created
# 96-09-06 fl Added getimage method
# 96-11-01 fl Rewritten, removed image attribute and crop method
# 97-05-09 fl Use PyImagingPaste method instead of image type
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
# 97-05-17 fl Support the "pilbitmap" booster patch
# 97-06-05 fl Added file= and data= argument to image constructors
# 98-03-09 fl Added width and height methods to Image classes
# 98-07-02 fl Use default mode for "P" images without palette attribute
# 98-07-02 fl Explicitly destroy Tkinter image objects
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
# 99-07-26 fl Automatically hook into Tkinter (if possible)
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
#
# Copyright (c) 1997-1999 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import tkinter
from io import BytesIO
from typing import Any
from . import Image, ImageFile
TYPE_CHECKING = False
if TYPE_CHECKING:
from ._typing import CapsuleType
# --------------------------------------------------------------------
# Check for Tkinter interface hooks
def _get_image_from_kw(kw: dict[str, Any]) -> ImageFile.ImageFile | None:
source = None
if "file" in kw:
source = kw.pop("file")
elif "data" in kw:
source = BytesIO(kw.pop("data"))
if not source:
return None
return Image.open(source)
def _pyimagingtkcall(
command: str, photo: PhotoImage | tkinter.PhotoImage, ptr: CapsuleType
) -> None:
tk = photo.tk
try:
tk.call(command, photo, repr(ptr))
except tkinter.TclError:
# activate Tkinter hook
# may raise an error if it cannot attach to Tkinter
from . import _imagingtk
_imagingtk.tkinit(tk.interpaddr())
tk.call(command, photo, repr(ptr))
# --------------------------------------------------------------------
# PhotoImage
class PhotoImage:
"""
A Tkinter-compatible photo image. This can be used
everywhere Tkinter expects an image object. If the image is an RGBA
image, pixels having alpha 0 are treated as transparent.
The constructor takes either a PIL image, or a mode and a size.
Alternatively, you can use the ``file`` or ``data`` options to initialize
the photo image object.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given.
:param size: If the first argument is a mode string, this defines the size
of the image.
:keyword file: A filename to load the image from (using
``Image.open(file)``).
:keyword data: An 8-bit string containing image data (as loaded from an
image file).
"""
def __init__(
self,
image: Image.Image | str | None = None,
size: tuple[int, int] | None = None,
**kw: Any,
) -> None:
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
if image is None:
msg = "Image is required"
raise ValueError(msg)
elif isinstance(image, str):
mode = image
image = None
if size is None:
msg = "If first argument is mode, size is required"
raise ValueError(msg)
else:
# got an image instead of a mode
mode = image.mode
if mode == "P":
# palette mapped data
image.apply_transparency()
image.load()
mode = image.palette.mode if image.palette else "RGB"
size = image.size
kw["width"], kw["height"] = size
if mode not in ["1", "L", "RGB", "RGBA"]:
mode = Image.getmodebase(mode)
self.__mode = mode
self.__size = size
self.__photo = tkinter.PhotoImage(**kw)
self.tk = self.__photo.tk
if image:
self.paste(image)
def __del__(self) -> None:
try:
name = self.__photo.name
except AttributeError:
return
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def __str__(self) -> str:
"""
Get the Tkinter photo image identifier. This method is automatically
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
method.
:return: A Tkinter photo image identifier (a string).
"""
return str(self.__photo)
def width(self) -> int:
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self) -> int:
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def paste(self, im: Image.Image) -> None:
"""
Paste a PIL image into the photo image. Note that this can
be very slow if the photo image is displayed.
:param im: A PIL image. The size must match the target region. If the
mode does not match, the image is converted to the mode of
the bitmap image.
"""
# convert to blittable
ptr = im.getim()
image = im.im
if not image.isblock() or im.mode != self.__mode:
block = Image.core.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
ptr = block.ptr
_pyimagingtkcall("PyImagingPhoto", self.__photo, ptr)
# --------------------------------------------------------------------
# BitmapImage
class BitmapImage:
"""
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
expects an image object.
The given image must have mode "1". Pixels having value 0 are treated as
transparent. Options, if any, are passed on to Tkinter. The most commonly
used option is ``foreground``, which is used to specify the color for the
non-transparent parts. See the Tkinter documentation for information on
how to specify colours.
:param image: A PIL image.
"""
def __init__(self, image: Image.Image | None = None, **kw: Any) -> None:
# Tk compatibility: file or data
if image is None:
image = _get_image_from_kw(kw)
if image is None:
msg = "Image is required"
raise ValueError(msg)
self.__mode = image.mode
self.__size = image.size
self.__photo = tkinter.BitmapImage(data=image.tobitmap(), **kw)
def __del__(self) -> None:
try:
name = self.__photo.name
except AttributeError:
return
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except Exception:
pass # ignore internal errors
def width(self) -> int:
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self) -> int:
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def __str__(self) -> str:
"""
Get the Tkinter bitmap image identifier. This method is automatically
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
method.
:return: A Tkinter bitmap image identifier (a string).
"""
return str(self.__photo)
def getimage(photo: PhotoImage) -> Image.Image:
"""Copies the contents of a PhotoImage to a PIL image memory."""
im = Image.new("RGBA", (photo.width(), photo.height()))
_pyimagingtkcall("PyImagingPhotoGet", photo, im.getim())
return im
venv\Lib\site-packages\PIL\ImageTransform.py
#
# The Python Imaging Library.
# $Id$
#
# transform wrappers
#
# History:
# 2002-04-08 fl Created
#
# Copyright (c) 2002 by Secret Labs AB
# Copyright (c) 2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from collections.abc import Sequence
from typing import Any
from . import Image
class Transform(Image.ImageTransformHandler):
"""Base class for other transforms defined in :py:mod:`~PIL.ImageTransform`."""
method: Image.Transform
def __init__(self, data: Sequence[Any]) -> None:
self.data = data
def getdata(self) -> tuple[Image.Transform, Sequence[int]]:
return self.method, self.data
def transform(
self,
size: tuple[int, int],
image: Image.Image,
**options: Any,
) -> Image.Image:
"""Perform the transform. Called from :py:meth:`.Image.transform`."""
# can be overridden
method, data = self.getdata()
return image.transform(size, method, data, **options)
class AffineTransform(Transform):
"""
Define an affine image transform.
This function takes a 6-tuple (a, b, c, d, e, f) which contain the first
two rows from the inverse of an affine transform matrix. For each pixel
(x, y) in the output image, the new value is taken from a position (a x +
b y + c, d x + e y + f) in the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
See :py:meth:`.Image.transform`
:param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows
from the inverse of an affine transform matrix.
"""
method = Image.Transform.AFFINE
class PerspectiveTransform(Transform):
"""
Define a perspective image transform.
This function takes an 8-tuple (a, b, c, d, e, f, g, h). For each pixel
(x, y) in the output image, the new value is taken from a position
((a x + b y + c) / (g x + h y + 1), (d x + e y + f) / (g x + h y + 1)) in
the input image, rounded to nearest pixel.
This function can be used to scale, translate, rotate, and shear the
original image.
See :py:meth:`.Image.transform`
:param matrix: An 8-tuple (a, b, c, d, e, f, g, h).
"""
method = Image.Transform.PERSPECTIVE
class ExtentTransform(Transform):
"""
Define a transform to extract a subregion from an image.
Maps a rectangle (defined by two corners) from the image to a rectangle of
the given size. The resulting image will contain data sampled from between
the corners, such that (x0, y0) in the input image will end up at (0,0) in
the output image, and (x1, y1) at size.
This method can be used to crop, stretch, shrink, or mirror an arbitrary
rectangle in the current image. It is slightly slower than crop, but about
as fast as a corresponding resize operation.
See :py:meth:`.Image.transform`
:param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the
input image's coordinate system. See :ref:`coordinate-system`.
"""
method = Image.Transform.EXTENT
class QuadTransform(Transform):
"""
Define a quad image transform.
Maps a quadrilateral (a region defined by four corners) from the image to a
rectangle of the given size.
See :py:meth:`.Image.transform`
:param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the
upper left, lower left, lower right, and upper right corner of the
source quadrilateral.
"""
method = Image.Transform.QUAD
class MeshTransform(Transform):
"""
Define a mesh image transform. A mesh transform consists of one or more
individual quad transforms.
See :py:meth:`.Image.transform`
:param data: A list of (bbox, quad) tuples.
"""
method = Image.Transform.MESH
venv\Lib\site-packages\PIL\ImageWin.py
#
# The Python Imaging Library.
# $Id$
#
# a Windows DIB display interface
#
# History:
# 1996-05-20 fl Created
# 1996-09-20 fl Fixed subregion exposure
# 1997-09-21 fl Added draw primitive (for tzPrint)
# 2003-05-21 fl Added experimental Window/ImageWindow classes
# 2003-09-05 fl Added fromstring/tostring methods
#
# Copyright (c) Secret Labs AB 1997-2003.
# Copyright (c) Fredrik Lundh 1996-2003.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image
class HDC:
"""
Wraps an HDC integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods.
"""
def __init__(self, dc: int) -> None:
self.dc = dc
def __int__(self) -> int:
return self.dc
class HWND:
"""
Wraps an HWND integer. The resulting object can be passed to the
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
methods, instead of a DC.
"""
def __init__(self, wnd: int) -> None:
self.wnd = wnd
def __int__(self) -> int:
return self.wnd
class Dib:
"""
A Windows bitmap with the given mode and size. The mode can be one of "1",
"L", "P", or "RGB".
If the display requires a palette, this constructor creates a suitable
palette and associates it with the image. For an "L" image, 128 graylevels
are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together
with 20 graylevels.
To make sure that palettes work properly under Windows, you must call the
``palette`` method upon certain events from Windows.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given. The mode can be one of "1",
"L", "P", or "RGB".
:param size: If the first argument is a mode string, this
defines the size of the image.
"""
def __init__(
self, image: Image.Image | str, size: tuple[int, int] | None = None
) -> None:
if isinstance(image, str):
mode = image
image = ""
if size is None:
msg = "If first argument is mode, size is required"
raise ValueError(msg)
else:
mode = image.mode
size = image.size
if mode not in ["1", "L", "P", "RGB"]:
mode = Image.getmodebase(mode)
self.image = Image.core.display(mode, size)
self.mode = mode
self.size = size
if image:
assert not isinstance(image, str)
self.paste(image)
def expose(self, handle: int | HDC | HWND) -> None:
"""
Copy the bitmap contents to a device context.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance. In PythonWin, you can use
``CDC.GetHandleAttrib()`` to get a suitable handle.
"""
handle_int = int(handle)
if isinstance(handle, HWND):
dc = self.image.getdc(handle_int)
try:
self.image.expose(dc)
finally:
self.image.releasedc(handle_int, dc)
else:
self.image.expose(handle_int)
def draw(
self,
handle: int | HDC | HWND,
dst: tuple[int, int, int, int],
src: tuple[int, int, int, int] | None = None,
) -> None:
"""
Same as expose, but allows you to specify where to draw the image, and
what part of it to draw.
The destination and source areas are given as 4-tuple rectangles. If
the source is omitted, the entire image is copied. If the source and
the destination have different sizes, the image is resized as
necessary.
"""
if src is None:
src = (0, 0) + self.size
handle_int = int(handle)
if isinstance(handle, HWND):
dc = self.image.getdc(handle_int)
try:
self.image.draw(dc, dst, src)
finally:
self.image.releasedc(handle_int, dc)
else:
self.image.draw(handle_int, dst, src)
def query_palette(self, handle: int | HDC | HWND) -> int:
"""
Installs the palette associated with the image in the given device
context.
This method should be called upon **QUERYNEWPALETTE** and
**PALETTECHANGED** events from Windows. If this method returns a
non-zero value, one or more display palette entries were changed, and
the image should be redrawn.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance.
:return: The number of entries that were changed (if one or more entries,
this indicates that the image should be redrawn).
"""
handle_int = int(handle)
if isinstance(handle, HWND):
handle = self.image.getdc(handle_int)
try:
result = self.image.query_palette(handle)
finally:
self.image.releasedc(handle, handle)
else:
result = self.image.query_palette(handle_int)
return result
def paste(
self, im: Image.Image, box: tuple[int, int, int, int] | None = None
) -> None:
"""
Paste a PIL image into the bitmap image.
:param im: A PIL image. The size must match the target region.
If the mode does not match, the image is converted to the
mode of the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and
lower pixel coordinate. See :ref:`coordinate-system`. If
None is given instead of a tuple, all of the image is
assumed.
"""
im.load()
if self.mode != im.mode:
im = im.convert(self.mode)
if box:
self.image.paste(im.im, box)
else:
self.image.paste(im.im)
def frombytes(self, buffer: bytes) -> None:
"""
Load display memory contents from byte data.
:param buffer: A buffer containing display data (usually
data returned from :py:func:`~PIL.ImageWin.Dib.tobytes`)
"""
self.image.frombytes(buffer)
def tobytes(self) -> bytes:
"""
Copy display memory contents to bytes object.
:return: A bytes object containing display data.
"""
return self.image.tobytes()
class Window:
"""Create a Window with the given title size."""
def __init__(
self, title: str = "PIL", width: int | None = None, height: int | None = None
) -> None:
self.hwnd = Image.core.createwindow(
title, self.__dispatcher, width or 0, height or 0
)
def __dispatcher(self, action: str, *args: int) -> None:
getattr(self, f"ui_handle_{action}")(*args)
def ui_handle_clear(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None:
pass
def ui_handle_damage(self, x0: int, y0: int, x1: int, y1: int) -> None:
pass
def ui_handle_destroy(self) -> None:
pass
def ui_handle_repair(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None:
pass
def ui_handle_resize(self, width: int, height: int) -> None:
pass
def mainloop(self) -> None:
Image.core.eventloop()
class ImageWindow(Window):
"""Create an image window which displays the given image."""
def __init__(self, image: Image.Image | Dib, title: str = "PIL") -> None:
if not isinstance(image, Dib):
image = Dib(image)
self.image = image
width, height = image.size
super().__init__(title, width=width, height=height)
def ui_handle_repair(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None:
self.image.draw(dc, (x0, y0, x1, y1))
venv\Lib\site-packages\PIL\ImImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# IFUNC IM file handling for PIL
#
# history:
# 1995-09-01 fl Created.
# 1997-01-03 fl Save palette images
# 1997-01-08 fl Added sequence support
# 1997-01-23 fl Added P and RGB save support
# 1997-05-31 fl Read floating point images
# 1997-06-22 fl Save floating point images
# 1997-08-27 fl Read and save 1-bit images
# 1998-06-25 fl Added support for RGB+LUT images
# 1998-07-02 fl Added support for YCC images
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 1998-12-29 fl Added I;16 support
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
# 2003-09-26 fl Added LA/PA support
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
import re
from typing import IO, Any
from . import Image, ImageFile, ImagePalette
from ._util import DeferredError
# --------------------------------------------------------------------
# Standard tags
COMMENT = "Comment"
DATE = "Date"
EQUIPMENT = "Digitalization equipment"
FRAMES = "File size (no of images)"
LUT = "Lut"
NAME = "Name"
SCALE = "Scale (x,y)"
SIZE = "Image size (x*y)"
MODE = "Image type"
TAGS = {
COMMENT: 0,
DATE: 0,
EQUIPMENT: 0,
FRAMES: 0,
LUT: 0,
NAME: 0,
SCALE: 0,
SIZE: 0,
MODE: 0,
}
OPEN = {
# ifunc93/p3cfunc formats
"0 1 image": ("1", "1"),
"L 1 image": ("1", "1"),
"Greyscale image": ("L", "L"),
"Grayscale image": ("L", "L"),
"RGB image": ("RGB", "RGB;L"),
"RLB image": ("RGB", "RLB"),
"RYB image": ("RGB", "RLB"),
"B1 image": ("1", "1"),
"B2 image": ("P", "P;2"),
"B4 image": ("P", "P;4"),
"X 24 image": ("RGB", "RGB"),
"L 32 S image": ("I", "I;32"),
"L 32 F image": ("F", "F;32"),
# old p3cfunc formats
"RGB3 image": ("RGB", "RGB;T"),
"RYB3 image": ("RGB", "RYB;T"),
# extensions
"LA image": ("LA", "LA;L"),
"PA image": ("LA", "PA;L"),
"RGBA image": ("RGBA", "RGBA;L"),
"RGBX image": ("RGB", "RGBX;L"),
"CMYK image": ("CMYK", "CMYK;L"),
"YCC image": ("YCbCr", "YCbCr;L"),
}
# ifunc95 extensions
for i in ["8", "8S", "16", "16S", "32", "32F"]:
OPEN[f"L {i} image"] = ("F", f"F;{i}")
OPEN[f"L*{i} image"] = ("F", f"F;{i}")
for i in ["16", "16L", "16B"]:
OPEN[f"L {i} image"] = (f"I;{i}", f"I;{i}")
OPEN[f"L*{i} image"] = (f"I;{i}", f"I;{i}")
for i in ["32S"]:
OPEN[f"L {i} image"] = ("I", f"I;{i}")
OPEN[f"L*{i} image"] = ("I", f"I;{i}")
for j in range(2, 33):
OPEN[f"L*{j} image"] = ("F", f"F;{j}")
# --------------------------------------------------------------------
# Read IM directory
split = re.compile(rb"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
def number(s: Any) -> float:
try:
return int(s)
except ValueError:
return float(s)
##
# Image plugin for the IFUNC IM file format.
class ImImageFile(ImageFile.ImageFile):
format = "IM"
format_description = "IFUNC Image Memory"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# Quick rejection: if there's not an LF among the first
# 100 bytes, this is (probably) not a text header.
if b"\n" not in self.fp.read(100):
msg = "not an IM file"
raise SyntaxError(msg)
self.fp.seek(0)
n = 0
# Default values
self.info[MODE] = "L"
self.info[SIZE] = (512, 512)
self.info[FRAMES] = 1
self.rawmode = "L"
while True:
s = self.fp.read(1)
# Some versions of IFUNC uses \n\r instead of \r\n...
if s == b"\r":
continue
if not s or s == b"\0" or s == b"\x1a":
break
# FIXME: this may read whole file if not a text file
s = s + self.fp.readline()
if len(s) > 100:
msg = "not an IM file"
raise SyntaxError(msg)
if s.endswith(b"\r\n"):
s = s[:-2]
elif s.endswith(b"\n"):
s = s[:-1]
try:
m = split.match(s)
except re.error as e:
msg = "not an IM file"
raise SyntaxError(msg) from e
if m:
k, v = m.group(1, 2)
# Don't know if this is the correct encoding,
# but a decent guess (I guess)
k = k.decode("latin-1", "replace")
v = v.decode("latin-1", "replace")
# Convert value as appropriate
if k in [FRAMES, SCALE, SIZE]:
v = v.replace("*", ",")
v = tuple(map(number, v.split(",")))
if len(v) == 1:
v = v[0]
elif k == MODE and v in OPEN:
v, self.rawmode = OPEN[v]
# Add to dictionary. Note that COMMENT tags are
# combined into a list of strings.
if k == COMMENT:
if k in self.info:
self.info[k].append(v)
else:
self.info[k] = [v]
else:
self.info[k] = v
if k in TAGS:
n += 1
else:
msg = f"Syntax error in IM header: {s.decode('ascii', 'replace')}"
raise SyntaxError(msg)
if not n:
msg = "Not an IM file"
raise SyntaxError(msg)
# Basic attributes
self._size = self.info[SIZE]
self._mode = self.info[MODE]
# Skip forward to start of image data
while s and not s.startswith(b"\x1a"):
s = self.fp.read(1)
if not s:
msg = "File truncated"
raise SyntaxError(msg)
if LUT in self.info:
# convert lookup table to palette or lut attribute
palette = self.fp.read(768)
greyscale = 1 # greyscale palette
linear = 1 # linear greyscale palette
for i in range(256):
if palette[i] == palette[i + 256] == palette[i + 512]:
if palette[i] != i:
linear = 0
else:
greyscale = 0
if self.mode in ["L", "LA", "P", "PA"]:
if greyscale:
if not linear:
self.lut = list(palette[:256])
else:
if self.mode in ["L", "P"]:
self._mode = self.rawmode = "P"
elif self.mode in ["LA", "PA"]:
self._mode = "PA"
self.rawmode = "PA;L"
self.palette = ImagePalette.raw("RGB;L", palette)
elif self.mode == "RGB":
if not greyscale or not linear:
self.lut = list(palette)
self.frame = 0
self.__offset = offs = self.fp.tell()
self._fp = self.fp # FIXME: hack
if self.rawmode.startswith("F;"):
# ifunc95 formats
try:
# use bit decoder (if necessary)
bits = int(self.rawmode[2:])
if bits not in [8, 16, 32]:
self.tile = [
ImageFile._Tile(
"bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1)
)
]
return
except ValueError:
pass
if self.rawmode in ["RGB;T", "RYB;T"]:
# Old LabEye/3PC files. Would be very surprised if anyone
# ever stumbled upon such a file ;-)
size = self.size[0] * self.size[1]
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offs, ("G", 0, -1)),
ImageFile._Tile("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)),
ImageFile._Tile(
"raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)
),
]
else:
# LabEye/IFUNC files
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))
]
@property
def n_frames(self) -> int:
return self.info[FRAMES]
@property
def is_animated(self) -> bool:
return self.info[FRAMES] > 1
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.frame = frame
if self.mode == "1":
bits = 1
else:
bits = 8 * len(self.mode)
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
offs = self.__offset + frame * size
self.fp = self._fp
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))
]
def tell(self) -> int:
return self.frame
#
# --------------------------------------------------------------------
# Save IM files
SAVE = {
# mode: (im type, raw mode)
"1": ("0 1", "1"),
"L": ("Greyscale", "L"),
"LA": ("LA", "LA;L"),
"P": ("Greyscale", "P"),
"PA": ("LA", "PA;L"),
"I": ("L 32S", "I;32S"),
"I;16": ("L 16", "I;16"),
"I;16L": ("L 16L", "I;16L"),
"I;16B": ("L 16B", "I;16B"),
"F": ("L 32F", "F;32F"),
"RGB": ("RGB", "RGB;L"),
"RGBA": ("RGBA", "RGBA;L"),
"RGBX": ("RGBX", "RGBX;L"),
"CMYK": ("CMYK", "CMYK;L"),
"YCbCr": ("YCC", "YCbCr;L"),
}
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
try:
image_type, rawmode = SAVE[im.mode]
except KeyError as e:
msg = f"Cannot save {im.mode} images as IM"
raise ValueError(msg) from e
frames = im.encoderinfo.get("frames", 1)
fp.write(f"Image type: {image_type} image\r\n".encode("ascii"))
if filename:
# Each line must be 100 characters or less,
# or: SyntaxError("not an IM file")
# 8 characters are used for "Name: " and "\r\n"
# Keep just the filename, ditch the potentially overlong path
if isinstance(filename, bytes):
filename = filename.decode("ascii")
name, ext = os.path.splitext(os.path.basename(filename))
name = "".join([name[: 92 - len(ext)], ext])
fp.write(f"Name: {name}\r\n".encode("ascii"))
fp.write(f"Image size (x*y): {im.size[0]}*{im.size[1]}\r\n".encode("ascii"))
fp.write(f"File size (no of images): {frames}\r\n".encode("ascii"))
if im.mode in ["P", "PA"]:
fp.write(b"Lut: 1\r\n")
fp.write(b"\000" * (511 - fp.tell()) + b"\032")
if im.mode in ["P", "PA"]:
im_palette = im.im.getpalette("RGB", "RGB;L")
colors = len(im_palette) // 3
palette = b""
for i in range(3):
palette += im_palette[colors * i : colors * (i + 1)]
palette += b"\x00" * (256 - colors)
fp.write(palette) # 768 bytes
ImageFile._save(
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))]
)
#
# --------------------------------------------------------------------
# Registry
Image.register_open(ImImageFile.format, ImImageFile)
Image.register_save(ImImageFile.format, _save)
Image.register_extension(ImImageFile.format, ".im")
venv\Lib\site-packages\PIL\ImtImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# IM Tools support for PIL
#
# history:
# 1996-05-27 fl Created (read 8-bit images only)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) Fredrik Lundh 1996-2001.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import re
from . import Image, ImageFile
#
# --------------------------------------------------------------------
field = re.compile(rb"([a-z]*) ([^ \r\n]*)")
##
# Image plugin for IM Tools images.
class ImtImageFile(ImageFile.ImageFile):
format = "IMT"
format_description = "IM Tools"
def _open(self) -> None:
# Quick rejection: if there's not a LF among the first
# 100 bytes, this is (probably) not a text header.
assert self.fp is not None
buffer = self.fp.read(100)
if b"\n" not in buffer:
msg = "not an IM file"
raise SyntaxError(msg)
xsize = ysize = 0
while True:
if buffer:
s = buffer[:1]
buffer = buffer[1:]
else:
s = self.fp.read(1)
if not s:
break
if s == b"\x0c":
# image data begins
self.tile = [
ImageFile._Tile(
"raw",
(0, 0) + self.size,
self.fp.tell() - len(buffer),
self.mode,
)
]
break
else:
# read key/value pair
if b"\n" not in buffer:
buffer += self.fp.read(100)
lines = buffer.split(b"\n")
s += lines.pop(0)
buffer = b"\n".join(lines)
if len(s) == 1 or len(s) > 100:
break
if s[0] == ord(b"*"):
continue # comment
m = field.match(s)
if not m:
break
k, v = m.group(1, 2)
if k == b"width":
xsize = int(v)
self._size = xsize, ysize
elif k == b"height":
ysize = int(v)
self._size = xsize, ysize
elif k == b"pixel" and v == b"n8":
self._mode = "L"
#
# --------------------------------------------------------------------
Image.register_open(ImtImageFile.format, ImtImageFile)
#
# no extension registered (".im" is simply too common)
venv\Lib\site-packages\PIL\IptcImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# IPTC/NAA file handling
#
# history:
# 1995-10-01 fl Created
# 1998-03-09 fl Cleaned up and added to PIL
# 2002-06-18 fl Added getiptcinfo helper
#
# Copyright (c) Secret Labs AB 1997-2002.
# Copyright (c) Fredrik Lundh 1995.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from collections.abc import Sequence
from io import BytesIO
from typing import cast
from . import Image, ImageFile
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._deprecate import deprecate
COMPRESSION = {1: "raw", 5: "jpeg"}
def __getattr__(name: str) -> bytes:
if name == "PAD":
deprecate("IptcImagePlugin.PAD", 12)
return b"\0\0\0\0"
msg = f"module '{__name__}' has no attribute '{name}'"
raise AttributeError(msg)
#
# Helpers
def _i(c: bytes) -> int:
return i32((b"\0\0\0\0" + c)[-4:])
def _i8(c: int | bytes) -> int:
return c if isinstance(c, int) else c[0]
def i(c: bytes) -> int:
""".. deprecated:: 10.2.0"""
deprecate("IptcImagePlugin.i", 12)
return _i(c)
def dump(c: Sequence[int | bytes]) -> None:
""".. deprecated:: 10.2.0"""
deprecate("IptcImagePlugin.dump", 12)
for i in c:
print(f"{_i8(i):02x}", end=" ")
print()
##
# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields
# from TIFF and JPEG files, use the getiptcinfo function.
class IptcImageFile(ImageFile.ImageFile):
format = "IPTC"
format_description = "IPTC/NAA"
def getint(self, key: tuple[int, int]) -> int:
return _i(self.info[key])
def field(self) -> tuple[tuple[int, int] | None, int]:
#
# get a IPTC field header
s = self.fp.read(5)
if not s.strip(b"\x00"):
return None, 0
tag = s[1], s[2]
# syntax
if s[0] != 0x1C or tag[0] not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 240]:
msg = "invalid IPTC/NAA file"
raise SyntaxError(msg)
# field size
size = s[3]
if size > 132:
msg = "illegal field length in IPTC/NAA file"
raise OSError(msg)
elif size == 128:
size = 0
elif size > 128:
size = _i(self.fp.read(size - 128))
else:
size = i16(s, 3)
return tag, size
def _open(self) -> None:
# load descriptive fields
while True:
offset = self.fp.tell()
tag, size = self.field()
if not tag or tag == (8, 10):
break
if size:
tagdata = self.fp.read(size)
else:
tagdata = None
if tag in self.info:
if isinstance(self.info[tag], list):
self.info[tag].append(tagdata)
else:
self.info[tag] = [self.info[tag], tagdata]
else:
self.info[tag] = tagdata
# mode
layers = self.info[(3, 60)][0]
component = self.info[(3, 60)][1]
if (3, 65) in self.info:
id = self.info[(3, 65)][0] - 1
else:
id = 0
if layers == 1 and not component:
self._mode = "L"
elif layers == 3 and component:
self._mode = "RGB"[id]
elif layers == 4 and component:
self._mode = "CMYK"[id]
# size
self._size = self.getint((3, 20)), self.getint((3, 30))
# compression
try:
compression = COMPRESSION[self.getint((3, 120))]
except KeyError as e:
msg = "Unknown IPTC image compression"
raise OSError(msg) from e
# tile
if tag == (8, 10):
self.tile = [
ImageFile._Tile("iptc", (0, 0) + self.size, offset, compression)
]
def load(self) -> Image.core.PixelAccess | None:
if len(self.tile) != 1 or self.tile[0][0] != "iptc":
return ImageFile.ImageFile.load(self)
offset, compression = self.tile[0][2:]
self.fp.seek(offset)
# Copy image data to temporary file
o = BytesIO()
if compression == "raw":
# To simplify access to the extracted file,
# prepend a PPM header
o.write(b"P5\n%d %d\n255\n" % self.size)
while True:
type, size = self.field()
if type != (8, 10):
break
while size > 0:
s = self.fp.read(min(size, 8192))
if not s:
break
o.write(s)
size -= len(s)
with Image.open(o) as _im:
_im.load()
self.im = _im.im
self.tile = []
return Image.Image.load(self)
Image.register_open(IptcImageFile.format, IptcImageFile)
Image.register_extension(IptcImageFile.format, ".iim")
def getiptcinfo(
im: ImageFile.ImageFile,
) -> dict[tuple[int, int], bytes | list[bytes]] | None:
"""
Get IPTC information from TIFF, JPEG, or IPTC file.
:param im: An image containing IPTC data.
:returns: A dictionary containing IPTC information, or None if
no IPTC information block was found.
"""
from . import JpegImagePlugin, TiffImagePlugin
data = None
info: dict[tuple[int, int], bytes | list[bytes]] = {}
if isinstance(im, IptcImageFile):
# return info dictionary right away
for k, v in im.info.items():
if isinstance(k, tuple):
info[k] = v
return info
elif isinstance(im, JpegImagePlugin.JpegImageFile):
# extract the IPTC/NAA resource
photoshop = im.info.get("photoshop")
if photoshop:
data = photoshop.get(0x0404)
elif isinstance(im, TiffImagePlugin.TiffImageFile):
# get raw data from the IPTC/NAA tag (PhotoShop tags the data
# as 4-byte integers, so we cannot use the get method...)
try:
data = im.tag_v2._tagdata[TiffImagePlugin.IPTC_NAA_CHUNK]
except KeyError:
pass
if data is None:
return None # no properties
# create an IptcImagePlugin object without initializing it
class FakeImage:
pass
fake_im = FakeImage()
fake_im.__class__ = IptcImageFile # type: ignore[assignment]
iptc_im = cast(IptcImageFile, fake_im)
# parse the IPTC information chunk
iptc_im.info = {}
iptc_im.fp = BytesIO(data)
try:
iptc_im._open()
except (IndexError, KeyError):
pass # expected failure
for k, v in iptc_im.info.items():
if isinstance(k, tuple):
info[k] = v
return info
venv\Lib\site-packages\PIL\Jpeg2KImagePlugin.py
#
# The Python Imaging Library
# $Id$
#
# JPEG2000 file handling
#
# History:
# 2014-03-12 ajh Created
# 2021-06-30 rogermb Extract dpi information from the 'resc' header box
#
# Copyright (c) 2014 Coriolis Systems Limited
# Copyright (c) 2014 Alastair Houghton
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
import os
import struct
from collections.abc import Callable
from typing import IO, cast
from . import Image, ImageFile, ImagePalette, _binary
class BoxReader:
"""
A small helper class to read fields stored in JPEG2000 header boxes
and to easily step into and read sub-boxes.
"""
def __init__(self, fp: IO[bytes], length: int = -1) -> None:
self.fp = fp
self.has_length = length >= 0
self.length = length
self.remaining_in_box = -1
def _can_read(self, num_bytes: int) -> bool:
if self.has_length and self.fp.tell() + num_bytes > self.length:
# Outside box: ensure we don't read past the known file length
return False
if self.remaining_in_box >= 0:
# Inside box contents: ensure read does not go past box boundaries
return num_bytes <= self.remaining_in_box
else:
return True # No length known, just read
def _read_bytes(self, num_bytes: int) -> bytes:
if not self._can_read(num_bytes):
msg = "Not enough data in header"
raise SyntaxError(msg)
data = self.fp.read(num_bytes)
if len(data) < num_bytes:
msg = f"Expected to read {num_bytes} bytes but only got {len(data)}."
raise OSError(msg)
if self.remaining_in_box > 0:
self.remaining_in_box -= num_bytes
return data
def read_fields(self, field_format: str) -> tuple[int | bytes, ...]:
size = struct.calcsize(field_format)
data = self._read_bytes(size)
return struct.unpack(field_format, data)
def read_boxes(self) -> BoxReader:
size = self.remaining_in_box
data = self._read_bytes(size)
return BoxReader(io.BytesIO(data), size)
def has_next_box(self) -> bool:
if self.has_length:
return self.fp.tell() + self.remaining_in_box < self.length
else:
return True
def next_box_type(self) -> bytes:
# Skip the rest of the box if it has not been read
if self.remaining_in_box > 0:
self.fp.seek(self.remaining_in_box, os.SEEK_CUR)
self.remaining_in_box = -1
# Read the length and type of the next box
lbox, tbox = cast(tuple[int, bytes], self.read_fields(">I4s"))
if lbox == 1:
lbox = cast(int, self.read_fields(">Q")[0])
hlen = 16
else:
hlen = 8
if lbox < hlen or not self._can_read(lbox - hlen):
msg = "Invalid header length"
raise SyntaxError(msg)
self.remaining_in_box = lbox - hlen
return tbox
def _parse_codestream(fp: IO[bytes]) -> tuple[tuple[int, int], str]:
"""Parse the JPEG 2000 codestream to extract the size and component
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
hdr = fp.read(2)
lsiz = _binary.i16be(hdr)
siz = hdr + fp.read(lsiz - 2)
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from(
">HHIIIIIIIIH", siz
)
size = (xsiz - xosiz, ysiz - yosiz)
if csiz == 1:
ssiz = struct.unpack_from(">B", siz, 38)
if (ssiz[0] & 0x7F) + 1 > 8:
mode = "I;16"
else:
mode = "L"
elif csiz == 2:
mode = "LA"
elif csiz == 3:
mode = "RGB"
elif csiz == 4:
mode = "RGBA"
else:
msg = "unable to determine J2K image mode"
raise SyntaxError(msg)
return size, mode
def _res_to_dpi(num: int, denom: int, exp: int) -> float | None:
"""Convert JPEG2000's (numerator, denominator, exponent-base-10) resolution,
calculated as (num / denom) * 10^exp and stored in dots per meter,
to floating-point dots per inch."""
if denom == 0:
return None
return (254 * num * (10**exp)) / (10000 * denom)
def _parse_jp2_header(
fp: IO[bytes],
) -> tuple[
tuple[int, int],
str,
str | None,
tuple[float, float] | None,
ImagePalette.ImagePalette | None,
]:
"""Parse the JP2 header box to extract size, component count,
color space information, and optionally DPI information,
returning a (size, mode, mimetype, dpi) tuple."""
# Find the JP2 header box
reader = BoxReader(fp)
header = None
mimetype = None
while reader.has_next_box():
tbox = reader.next_box_type()
if tbox == b"jp2h":
header = reader.read_boxes()
break
elif tbox == b"ftyp":
if reader.read_fields(">4s")[0] == b"jpx ":
mimetype = "image/jpx"
assert header is not None
size = None
mode = None
bpc = None
nc = None
dpi = None # 2-tuple of DPI info, or None
palette = None
while header.has_next_box():
tbox = header.next_box_type()
if tbox == b"ihdr":
height, width, nc, bpc = header.read_fields(">IIHB")
assert isinstance(height, int)
assert isinstance(width, int)
assert isinstance(bpc, int)
size = (width, height)
if nc == 1 and (bpc & 0x7F) > 8:
mode = "I;16"
elif nc == 1:
mode = "L"
elif nc == 2:
mode = "LA"
elif nc == 3:
mode = "RGB"
elif nc == 4:
mode = "RGBA"
elif tbox == b"colr" and nc == 4:
meth, _, _, enumcs = header.read_fields(">BBBI")
if meth == 1 and enumcs == 12:
mode = "CMYK"
elif tbox == b"pclr" and mode in ("L", "LA"):
ne, npc = header.read_fields(">HB")
assert isinstance(ne, int)
assert isinstance(npc, int)
max_bitdepth = 0
for bitdepth in header.read_fields(">" + ("B" * npc)):
assert isinstance(bitdepth, int)
if bitdepth > max_bitdepth:
max_bitdepth = bitdepth
if max_bitdepth <= 8:
palette = ImagePalette.ImagePalette("RGBA" if npc == 4 else "RGB")
for i in range(ne):
color: list[int] = []
for value in header.read_fields(">" + ("B" * npc)):
assert isinstance(value, int)
color.append(value)
palette.getcolor(tuple(color))
mode = "P" if mode == "L" else "PA"
elif tbox == b"res ":
res = header.read_boxes()
while res.has_next_box():
tres = res.next_box_type()
if tres == b"resc":
vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB")
assert isinstance(vrcn, int)
assert isinstance(vrcd, int)
assert isinstance(hrcn, int)
assert isinstance(hrcd, int)
assert isinstance(vrce, int)
assert isinstance(hrce, int)
hres = _res_to_dpi(hrcn, hrcd, hrce)
vres = _res_to_dpi(vrcn, vrcd, vrce)
if hres is not None and vres is not None:
dpi = (hres, vres)
break
if size is None or mode is None:
msg = "Malformed JP2 header"
raise SyntaxError(msg)
return size, mode, mimetype, dpi, palette
##
# Image plugin for JPEG2000 images.
class Jpeg2KImageFile(ImageFile.ImageFile):
format = "JPEG2000"
format_description = "JPEG 2000 (ISO 15444)"
def _open(self) -> None:
sig = self.fp.read(4)
if sig == b"\xff\x4f\xff\x51":
self.codec = "j2k"
self._size, self._mode = _parse_codestream(self.fp)
self._parse_comment()
else:
sig = sig + self.fp.read(8)
if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a":
self.codec = "jp2"
header = _parse_jp2_header(self.fp)
self._size, self._mode, self.custom_mimetype, dpi, self.palette = header
if dpi is not None:
self.info["dpi"] = dpi
if self.fp.read(12).endswith(b"jp2c\xff\x4f\xff\x51"):
hdr = self.fp.read(2)
length = _binary.i16be(hdr)
self.fp.seek(length - 2, os.SEEK_CUR)
self._parse_comment()
else:
msg = "not a JPEG 2000 file"
raise SyntaxError(msg)
self._reduce = 0
self.layers = 0
fd = -1
length = -1
try:
fd = self.fp.fileno()
length = os.fstat(fd).st_size
except Exception:
fd = -1
try:
pos = self.fp.tell()
self.fp.seek(0, io.SEEK_END)
length = self.fp.tell()
self.fp.seek(pos)
except Exception:
length = -1
self.tile = [
ImageFile._Tile(
"jpeg2k",
(0, 0) + self.size,
0,
(self.codec, self._reduce, self.layers, fd, length),
)
]
def _parse_comment(self) -> None:
while True:
marker = self.fp.read(2)
if not marker:
break
typ = marker[1]
if typ in (0x90, 0xD9):
# Start of tile or end of codestream
break
hdr = self.fp.read(2)
length = _binary.i16be(hdr)
if typ == 0x64:
# Comment
self.info["comment"] = self.fp.read(length - 2)[2:]
break
else:
self.fp.seek(length - 2, os.SEEK_CUR)
@property # type: ignore[override]
def reduce(
self,
) -> (
Callable[[int | tuple[int, int], tuple[int, int, int, int] | None], Image.Image]
| int
):
# https://github.com/python-pillow/Pillow/issues/4343 found that the
# new Image 'reduce' method was shadowed by this plugin's 'reduce'
# property. This attempts to allow for both scenarios
return self._reduce or super().reduce
@reduce.setter
def reduce(self, value: int) -> None:
self._reduce = value
def load(self) -> Image.core.PixelAccess | None:
if self.tile and self._reduce:
power = 1 << self._reduce
adjust = power >> 1
self._size = (
int((self.size[0] + adjust) / power),
int((self.size[1] + adjust) / power),
)
# Update the reduce and layers settings
t = self.tile[0]
assert isinstance(t[3], tuple)
t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4])
self.tile = [ImageFile._Tile(t[0], (0, 0) + self.size, t[2], t3)]
return ImageFile.ImageFile.load(self)
def _accept(prefix: bytes) -> bool:
return prefix.startswith(
(b"\xff\x4f\xff\x51", b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a")
)
# ------------------------------------------------------------
# Save support
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
# Get the keyword arguments
info = im.encoderinfo
if isinstance(filename, str):
filename = filename.encode()
if filename.endswith(b".j2k") or info.get("no_jp2", False):
kind = "j2k"
else:
kind = "jp2"
offset = info.get("offset", None)
tile_offset = info.get("tile_offset", None)
tile_size = info.get("tile_size", None)
quality_mode = info.get("quality_mode", "rates")
quality_layers = info.get("quality_layers", None)
if quality_layers is not None and not (
isinstance(quality_layers, (list, tuple))
and all(
isinstance(quality_layer, (int, float)) for quality_layer in quality_layers
)
):
msg = "quality_layers must be a sequence of numbers"
raise ValueError(msg)
num_resolutions = info.get("num_resolutions", 0)
cblk_size = info.get("codeblock_size", None)
precinct_size = info.get("precinct_size", None)
irreversible = info.get("irreversible", False)
progression = info.get("progression", "LRCP")
cinema_mode = info.get("cinema_mode", "no")
mct = info.get("mct", 0)
signed = info.get("signed", False)
comment = info.get("comment")
if isinstance(comment, str):
comment = comment.encode()
plt = info.get("plt", False)
fd = -1
if hasattr(fp, "fileno"):
try:
fd = fp.fileno()
except Exception:
fd = -1
im.encoderconfig = (
offset,
tile_offset,
tile_size,
quality_mode,
quality_layers,
num_resolutions,
cblk_size,
precinct_size,
irreversible,
progression,
cinema_mode,
mct,
signed,
fd,
comment,
plt,
)
ImageFile._save(im, fp, [ImageFile._Tile("jpeg2k", (0, 0) + im.size, 0, kind)])
# ------------------------------------------------------------
# Registry stuff
Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept)
Image.register_save(Jpeg2KImageFile.format, _save)
Image.register_extensions(
Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"]
)
Image.register_mime(Jpeg2KImageFile.format, "image/jp2")
venv\Lib\site-packages\PIL\JpegImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continuous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 1995-09-09 fl Created
# 1995-09-13 fl Added full parser
# 1996-03-25 fl Added hack to use the IJG command line utilities
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 1996-05-28 fl Added draft support, JFIF version (0.1)
# 1996-12-30 fl Added encoder options, added progression property (0.2)
# 1997-08-27 fl Save mode 1 images as BW (0.3)
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
# 2003-09-13 fl Extract COM markers
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
# 2009-03-08 fl Added subsampling support (from Justin Huff).
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import array
import io
import math
import os
import struct
import subprocess
import sys
import tempfile
import warnings
from typing import IO, Any
from . import Image, ImageFile
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._binary import o8
from ._binary import o16be as o16
from ._deprecate import deprecate
from .JpegPresets import presets
TYPE_CHECKING = False
if TYPE_CHECKING:
from .MpoImagePlugin import MpoImageFile
#
# Parser
def Skip(self: JpegImageFile, marker: int) -> None:
n = i16(self.fp.read(2)) - 2
ImageFile._safe_read(self.fp, n)
def APP(self: JpegImageFile, marker: int) -> None:
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2)) - 2
s = ImageFile._safe_read(self.fp, n)
app = f"APP{marker & 15}"
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s.startswith(b"JFIF"):
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = s[7]
jfif_density = i16(s, 8), i16(s, 10)
except Exception:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
elif jfif_unit == 2: # cm
# 1 dpcm = 2.54 dpi
self.info["dpi"] = tuple(d * 2.54 for d in jfif_density)
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s.startswith(b"Exif\0\0"):
# extract EXIF information
if "exif" in self.info:
self.info["exif"] += s[6:]
else:
self.info["exif"] = s
self._exif_offset = self.fp.tell() - n + 6
elif marker == 0xFFE1 and s.startswith(b"http://ns.adobe.com/xap/1.0/\x00"):
self.info["xmp"] = s.split(b"\x00", 1)[1]
elif marker == 0xFFE2 and s.startswith(b"FPXR\0"):
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s.startswith(b"ICC_PROFILE\0"):
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFED and s.startswith(b"Photoshop 3.0\x00"):
# parse the image resource block
offset = 14
photoshop = self.info.setdefault("photoshop", {})
while s[offset : offset + 4] == b"8BIM":
try:
offset += 4
# resource code
code = i16(s, offset)
offset += 2
# resource name (usually empty)
name_len = s[offset]
# name = s[offset+1:offset+1+name_len]
offset += 1 + name_len
offset += offset & 1 # align
# resource data block
size = i32(s, offset)
offset += 4
data = s[offset : offset + size]
if code == 0x03ED: # ResolutionInfo
photoshop[code] = {
"XResolution": i32(data, 0) / 65536,
"DisplayedUnitsX": i16(data, 4),
"YResolution": i32(data, 8) / 65536,
"DisplayedUnitsY": i16(data, 12),
}
else:
photoshop[code] = data
offset += size
offset += offset & 1 # align
except struct.error:
break # insufficient data
elif marker == 0xFFEE and s.startswith(b"Adobe"):
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = s[11]
except IndexError:
pass
else:
self.info["adobe_transform"] = adobe_transform
elif marker == 0xFFE2 and s.startswith(b"MPF\0"):
# extract MPO information
self.info["mp"] = s[4:]
# offset is current location minus buffer size
# plus constant header size
self.info["mpoffset"] = self.fp.tell() - n + 4
def COM(self: JpegImageFile, marker: int) -> None:
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2)) - 2
s = ImageFile._safe_read(self.fp, n)
self.info["comment"] = s
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self: JpegImageFile, marker: int) -> None:
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2)) - 2
s = ImageFile._safe_read(self.fp, n)
self._size = i16(s, 3), i16(s, 1)
self.bits = s[0]
if self.bits != 8:
msg = f"cannot handle {self.bits}-bit layers"
raise SyntaxError(msg)
self.layers = s[5]
if self.layers == 1:
self._mode = "L"
elif self.layers == 3:
self._mode = "RGB"
elif self.layers == 4:
self._mode = "CMYK"
else:
msg = f"cannot handle {self.layers}-layer images"
raise SyntaxError(msg)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if self.icclist[0][13] == len(self.icclist):
profile = [p[14:] for p in self.icclist]
icc_profile = b"".join(profile)
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = []
for i in range(6, len(s), 3):
t = s[i : i + 3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], t[1] // 16, t[1] & 15, t[2]))
def DQT(self: JpegImageFile, marker: int) -> None:
#
# Define quantization table. Note that there might be more
# than one table in each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2)) - 2
s = ImageFile._safe_read(self.fp, n)
while len(s):
v = s[0]
precision = 1 if (v // 16 == 0) else 2 # in bytes
qt_length = 1 + precision * 64
if len(s) < qt_length:
msg = "bad quantization table marker"
raise SyntaxError(msg)
data = array.array("B" if precision == 1 else "H", s[1:qt_length])
if sys.byteorder == "little" and precision > 1:
data.byteswap() # the values are always big-endian
self.quantization[v & 15] = [data[i] for i in zigzag_index]
s = s[qt_length:]
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM),
}
def _accept(prefix: bytes) -> bool:
# Magic number was taken from https://en.wikipedia.org/wiki/JPEG
return prefix.startswith(b"\xff\xd8\xff")
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self) -> None:
s = self.fp.read(3)
if not _accept(s):
msg = "not a JPEG file"
raise SyntaxError(msg)
s = b"\xff"
# Create attributes
self.bits = self.layers = 0
self._exif_offset = 0
# JPEG specifics (internal)
self.layer: list[tuple[int, int, int, int]] = []
self._huffman_dc: dict[Any, Any] = {}
self._huffman_ac: dict[Any, Any] = {}
self.quantization: dict[int, list[int]] = {}
self.app: dict[str, bytes] = {} # compatibility
self.applist: list[tuple[str, bytes]] = []
self.icclist: list[bytes] = []
while True:
i = s[0]
if i == 0xFF:
s = s + self.fp.read(1)
i = i16(s)
else:
# Skip non-0xFF junk
s = self.fp.read(1)
continue
if i in MARKER:
name, description, handler = MARKER[i]
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [
ImageFile._Tile("jpeg", (0, 0) + self.size, 0, (rawmode, ""))
]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i in {0, 0xFFFF}:
# padded marker or junk; move on
s = b"\xff"
elif i == 0xFF00: # Skip extraneous data (escaped 0xFF)
s = self.fp.read(1)
else:
msg = "no marker found"
raise SyntaxError(msg)
self._read_dpi_from_exif()
def __getattr__(self, name: str) -> Any:
if name in ("huffman_ac", "huffman_dc"):
deprecate(name, 12)
return getattr(self, "_" + name)
raise AttributeError(name)
def __getstate__(self) -> list[Any]:
return super().__getstate__() + [self.layers, self.layer]
def __setstate__(self, state: list[Any]) -> None:
self.layers, self.layer = state[6:]
super().__setstate__(state)
def load_read(self, read_bytes: int) -> bytes:
"""
internal: read more image data
For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker
so libjpeg can finish decoding
"""
s = self.fp.read(read_bytes)
if not s and ImageFile.LOAD_TRUNCATED_IMAGES and not hasattr(self, "_ended"):
# Premature EOF.
# Pretend file is finished adding EOI marker
self._ended = True
return b"\xff\xd9"
return s
def draft(
self, mode: str | None, size: tuple[int, int] | None
) -> tuple[str, tuple[int, int, float, float]] | None:
if len(self.tile) != 1:
return None
# Protect from second call
if self.decoderconfig:
return None
d, e, o, a = self.tile[0]
scale = 1
original_size = self.size
assert isinstance(a, tuple)
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self._mode = mode
a = mode, ""
if size:
scale = min(self.size[0] // size[0], self.size[1] // size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
assert e is not None
e = (
e[0],
e[1],
(e[2] - e[0] + s - 1) // s + e[0],
(e[3] - e[1] + s - 1) // s + e[1],
)
self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s)
scale = s
self.tile = [ImageFile._Tile(d, e, o, a)]
self.decoderconfig = (scale, 0)
box = (0, 0, original_size[0] / scale, original_size[1] / scale)
return self.mode, box
def load_djpeg(self) -> None:
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
f, path = tempfile.mkstemp()
os.close(f)
if os.path.exists(self.filename):
subprocess.check_call(["djpeg", "-outfile", path, self.filename])
else:
try:
os.unlink(path)
except OSError:
pass
msg = "Invalid Filename"
raise ValueError(msg)
try:
with Image.open(path) as _im:
_im.load()
self.im = _im.im
finally:
try:
os.unlink(path)
except OSError:
pass
self._mode = self.im.mode
self._size = self.im.size
self.tile = []
def _getexif(self) -> dict[int, Any] | None:
return _getexif(self)
def _read_dpi_from_exif(self) -> None:
# If DPI isn't in JPEG header, fetch from EXIF
if "dpi" in self.info or "exif" not in self.info:
return
try:
exif = self.getexif()
resolution_unit = exif[0x0128]
x_resolution = exif[0x011A]
try:
dpi = float(x_resolution[0]) / x_resolution[1]
except TypeError:
dpi = x_resolution
if math.isnan(dpi):
msg = "DPI is not a number"
raise ValueError(msg)
if resolution_unit == 3: # cm
# 1 dpcm = 2.54 dpi
dpi *= 2.54
self.info["dpi"] = dpi, dpi
except (
struct.error, # truncated EXIF
KeyError, # dpi not included
SyntaxError, # invalid/unreadable EXIF
TypeError, # dpi is an invalid float
ValueError, # dpi is an invalid float
ZeroDivisionError, # invalid dpi rational value
):
self.info["dpi"] = 72, 72
def _getmp(self) -> dict[int, Any] | None:
return _getmp(self)
def _getexif(self: JpegImageFile) -> dict[int, Any] | None:
if "exif" not in self.info:
return None
return self.getexif()._get_merged_dict()
def _getmp(self: JpegImageFile) -> dict[int, Any] | None:
# Extract MP information. This method was inspired by the "highly
# experimental" _getexif version that's been in use for years now,
# itself based on the ImageFileDirectory class in the TIFF plugin.
# The MP record essentially consists of a TIFF file embedded in a JPEG
# application marker.
try:
data = self.info["mp"]
except KeyError:
return None
file_contents = io.BytesIO(data)
head = file_contents.read(8)
endianness = ">" if head.startswith(b"\x4d\x4d\x00\x2a") else "<"
# process dictionary
from . import TiffImagePlugin
try:
info = TiffImagePlugin.ImageFileDirectory_v2(head)
file_contents.seek(info.next)
info.load(file_contents)
mp = dict(info)
except Exception as e:
msg = "malformed MP Index (unreadable directory)"
raise SyntaxError(msg) from e
# it's an error not to have a number of images
try:
quant = mp[0xB001]
except KeyError as e:
msg = "malformed MP Index (no number of images)"
raise SyntaxError(msg) from e
# get MP entries
mpentries = []
try:
rawmpentries = mp[0xB002]
for entrynum in range(quant):
unpackedentry = struct.unpack_from(
f"{endianness}LLLHH", rawmpentries, entrynum * 16
)
labels = ("Attribute", "Size", "DataOffset", "EntryNo1", "EntryNo2")
mpentry = dict(zip(labels, unpackedentry))
mpentryattr = {
"DependentParentImageFlag": bool(mpentry["Attribute"] & (1 << 31)),
"DependentChildImageFlag": bool(mpentry["Attribute"] & (1 << 30)),
"RepresentativeImageFlag": bool(mpentry["Attribute"] & (1 << 29)),
"Reserved": (mpentry["Attribute"] & (3 << 27)) >> 27,
"ImageDataFormat": (mpentry["Attribute"] & (7 << 24)) >> 24,
"MPType": mpentry["Attribute"] & 0x00FFFFFF,
}
if mpentryattr["ImageDataFormat"] == 0:
mpentryattr["ImageDataFormat"] = "JPEG"
else:
msg = "unsupported picture format in MPO"
raise SyntaxError(msg)
mptypemap = {
0x000000: "Undefined",
0x010001: "Large Thumbnail (VGA Equivalent)",
0x010002: "Large Thumbnail (Full HD Equivalent)",
0x020001: "Multi-Frame Image (Panorama)",
0x020002: "Multi-Frame Image: (Disparity)",
0x020003: "Multi-Frame Image: (Multi-Angle)",
0x030000: "Baseline MP Primary Image",
}
mpentryattr["MPType"] = mptypemap.get(mpentryattr["MPType"], "Unknown")
mpentry["Attribute"] = mpentryattr
mpentries.append(mpentry)
mp[0xB002] = mpentries
except KeyError as e:
msg = "malformed MP Index (bad MP Entry)"
raise SyntaxError(msg) from e
# Next we should try and parse the individual image unique ID list;
# we don't because I've never seen this actually used in a real MPO
# file and so can't test it.
return mp
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
# fmt: off
zigzag_index = (
0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63,
)
samplings = {
(1, 1, 1, 1, 1, 1): 0,
(2, 1, 1, 1, 1, 1): 1,
(2, 2, 1, 1, 1, 1): 2,
}
# fmt: on
def get_sampling(im: Image.Image) -> int:
# There's no subsampling when images have only 1 layer
# (grayscale images) or when they are CMYK (4 layers),
# so set subsampling to the default value.
#
# NOTE: currently Pillow can't encode JPEG to YCCK format.
# If YCCK support is added in the future, subsampling code will have
# to be updated (here and in JpegEncode.c) to deal with 4 layers.
if not isinstance(im, JpegImageFile) or im.layers in (1, 4):
return -1
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.width == 0 or im.height == 0:
msg = "cannot write empty image as JPEG"
raise ValueError(msg)
try:
rawmode = RAWMODE[im.mode]
except KeyError as e:
msg = f"cannot write mode {im.mode} as JPEG"
raise OSError(msg) from e
info = im.encoderinfo
dpi = [round(x) for x in info.get("dpi", (0, 0))]
quality = info.get("quality", -1)
subsampling = info.get("subsampling", -1)
qtables = info.get("qtables")
if quality == "keep":
quality = -1
subsampling = "keep"
qtables = "keep"
elif quality in presets:
preset = presets[quality]
quality = -1
subsampling = preset.get("subsampling", -1)
qtables = preset.get("quantization")
elif not isinstance(quality, int):
msg = "Invalid quality setting"
raise ValueError(msg)
else:
if subsampling in presets:
subsampling = presets[subsampling].get("subsampling", -1)
if isinstance(qtables, str) and qtables in presets:
qtables = presets[qtables].get("quantization")
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:2:0":
subsampling = 2
elif subsampling == "4:1:1":
# For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0.
# Set 4:2:0 if someone is still using that value.
subsampling = 2
elif subsampling == "keep":
if im.format != "JPEG":
msg = "Cannot use 'keep' when original image is not a JPEG"
raise ValueError(msg)
subsampling = get_sampling(im)
def validate_qtables(
qtables: (
str | tuple[list[int], ...] | list[list[int]] | dict[int, list[int]] | None
),
) -> list[list[int]] | None:
if qtables is None:
return qtables
if isinstance(qtables, str):
try:
lines = [
int(num)
for line in qtables.splitlines()
for num in line.split("#", 1)[0].split()
]
except ValueError as e:
msg = "Invalid quantization table"
raise ValueError(msg) from e
else:
qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = [
qtables[key] for key in range(len(qtables)) if key in qtables
]
elif isinstance(qtables, tuple):
qtables = list(qtables)
if not (0 < len(qtables) < 5):
msg = "None or too many quantization tables"
raise ValueError(msg)
for idx, table in enumerate(qtables):
try:
if len(table) != 64:
msg = "Invalid quantization table"
raise TypeError(msg)
table_array = array.array("H", table)
except TypeError as e:
msg = "Invalid quantization table"
raise ValueError(msg) from e
else:
qtables[idx] = list(table_array)
return qtables
if qtables == "keep":
if im.format != "JPEG":
msg = "Cannot use 'keep' when original image is not a JPEG"
raise ValueError(msg)
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = info.get("extra", b"")
MAX_BYTES_IN_MARKER = 65533
if xmp := info.get("xmp"):
overhead_len = 29 # b"http://ns.adobe.com/xap/1.0/\x00"
max_data_bytes_in_marker = MAX_BYTES_IN_MARKER - overhead_len
if len(xmp) > max_data_bytes_in_marker:
msg = "XMP data is too long"
raise ValueError(msg)
size = o16(2 + overhead_len + len(xmp))
extra += b"\xff\xe1" + size + b"http://ns.adobe.com/xap/1.0/\x00" + xmp
if icc_profile := info.get("icc_profile"):
overhead_len = 14 # b"ICC_PROFILE\0" + o8(i) + o8(len(markers))
max_data_bytes_in_marker = MAX_BYTES_IN_MARKER - overhead_len
markers = []
while icc_profile:
markers.append(icc_profile[:max_data_bytes_in_marker])
icc_profile = icc_profile[max_data_bytes_in_marker:]
i = 1
for marker in markers:
size = o16(2 + overhead_len + len(marker))
extra += (
b"\xff\xe2"
+ size
+ b"ICC_PROFILE\0"
+ o8(i)
+ o8(len(markers))
+ marker
)
i += 1
comment = info.get("comment", im.info.get("comment"))
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
progressive = info.get("progressive", False) or info.get("progression", False)
optimize = info.get("optimize", False)
exif = info.get("exif", b"")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
if len(exif) > MAX_BYTES_IN_MARKER:
msg = "EXIF data is too long"
raise ValueError(msg)
# get keyword arguments
im.encoderconfig = (
quality,
progressive,
info.get("smooth", 0),
optimize,
info.get("keep_rgb", False),
info.get("streamtype", 0),
dpi,
subsampling,
info.get("restart_marker_blocks", 0),
info.get("restart_marker_rows", 0),
qtables,
comment,
extra,
exif,
)
# if we optimize, libjpeg needs a buffer big enough to hold the whole image
# in a shot. Guessing on the size, at im.size bytes. (raw pixel size is
# channels*size, this is a value that's been used in a django patch.
# https://github.com/matthewwithanm/django-imagekit/issues/50
if optimize or progressive:
# CMYK can be bigger
if im.mode == "CMYK":
bufsize = 4 * im.size[0] * im.size[1]
# keep sets quality to -1, but the actual value may be high.
elif quality >= 95 or quality == -1:
bufsize = 2 * im.size[0] * im.size[1]
else:
bufsize = im.size[0] * im.size[1]
if exif:
bufsize += len(exif) + 5
if extra:
bufsize += len(extra) + 1
else:
# The EXIF info needs to be written as one block, + APP1, + one spare byte.
# Ensure that our buffer is big enough. Same with the icc_profile block.
bufsize = max(len(exif) + 5, len(extra) + 1)
ImageFile._save(
im, fp, [ImageFile._Tile("jpeg", (0, 0) + im.size, 0, rawmode)], bufsize
)
def _save_cjpeg(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
tempfile = im._dump()
subprocess.check_call(["cjpeg", "-outfile", filename, tempfile])
try:
os.unlink(tempfile)
except OSError:
pass
##
# Factory for making JPEG and MPO instances
def jpeg_factory(
fp: IO[bytes], filename: str | bytes | None = None
) -> JpegImageFile | MpoImageFile:
im = JpegImageFile(fp, filename)
try:
mpheader = im._getmp()
if mpheader is not None and mpheader[45057] > 1:
for segment, content in im.applist:
if segment == "APP1" and b' hdrgm:Version="' in content:
# Ultra HDR images are not yet supported
return im
# It's actually an MPO
from .MpoImagePlugin import MpoImageFile
# Don't reload everything, just convert it.
im = MpoImageFile.adopt(im, mpheader)
except (TypeError, IndexError):
# It is really a JPEG
pass
except SyntaxError:
warnings.warn(
"Image appears to be a malformed MPO file, it will be "
"interpreted as a base JPEG file"
)
return im
# ---------------------------------------------------------------------
# Registry stuff
Image.register_open(JpegImageFile.format, jpeg_factory, _accept)
Image.register_save(JpegImageFile.format, _save)
Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"])
Image.register_mime(JpegImageFile.format, "image/jpeg")
#
# The Python Imaging Library.
# $Id$
#
# Basic McIdas support for PIL
#
# History:
# 1997-05-05 fl Created (8-bit images only)
# 2009-03-08 fl Added 16/32-bit support.
#
# Thanks to Richard Jones and Craig Swank for specs and samples.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import struct
from . import Image, ImageFile
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"\x00\x00\x00\x00\x00\x00\x00\x04")
##
# Image plugin for McIdas area images.
class McIdasImageFile(ImageFile.ImageFile):
format = "MCIDAS"
format_description = "McIdas area file"
def _open(self) -> None:
# parse area file directory
assert self.fp is not None
s = self.fp.read(256)
if not _accept(s) or len(s) != 256:
msg = "not an McIdas area file"
raise SyntaxError(msg)
self.area_descriptor_raw = s
self.area_descriptor = w = [0, *struct.unpack("!64i", s)]
# get mode
if w[11] == 1:
mode = rawmode = "L"
elif w[11] == 2:
mode = rawmode = "I;16B"
elif w[11] == 4:
# FIXME: add memory map support
mode = "I"
rawmode = "I;32B"
else:
msg = "unsupported McIdas format"
raise SyntaxError(msg)
self._mode = mode
self._size = w[10], w[9]
offset = w[34] + w[15]
stride = w[15] + w[10] * w[11] * w[14]
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))
]
# --------------------------------------------------------------------
# registry
Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
# no default extension
venv\Lib\site-packages\PIL\MicImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# Microsoft Image Composer support for PIL
#
# Notes:
# uses TiffImagePlugin.py to read the actual image streams
#
# History:
# 97-01-20 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import olefile
from . import Image, TiffImagePlugin
#
# --------------------------------------------------------------------
def _accept(prefix: bytes) -> bool:
return prefix.startswith(olefile.MAGIC)
##
# Image plugin for Microsoft's Image Composer file format.
class MicImageFile(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = olefile.OleFileIO(self.fp)
except OSError as e:
msg = "not an MIC file; invalid OLE file"
raise SyntaxError(msg) from e
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = [
path
for path in self.ole.listdir()
if path[1:] and path[0].endswith(".ACI") and path[1] == "Image"
]
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
msg = "not an MIC file; no image entries"
raise SyntaxError(msg)
self.frame = -1
self._n_frames = len(self.images)
self.is_animated = self._n_frames > 1
self.__fp = self.fp
self.seek(0)
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
filename = self.images[frame]
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
def tell(self) -> int:
return self.frame
def close(self) -> None:
self.__fp.close()
self.ole.close()
super().close()
def __exit__(self, *args: object) -> None:
self.__fp.close()
self.ole.close()
super().__exit__()
#
# --------------------------------------------------------------------
Image.register_open(MicImageFile.format, MicImageFile, _accept)
Image.register_extension(MicImageFile.format, ".mic")
venv\Lib\site-packages\PIL\MpegImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# MPEG file handling
#
# History:
# 95-09-09 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFile
from ._binary import i8
from ._typing import SupportsRead
#
# Bitstream parser
class BitStream:
def __init__(self, fp: SupportsRead[bytes]) -> None:
self.fp = fp
self.bits = 0
self.bitbuffer = 0
def next(self) -> int:
return i8(self.fp.read(1))
def peek(self, bits: int) -> int:
while self.bits < bits:
self.bitbuffer = (self.bitbuffer << 8) + self.next()
self.bits += 8
return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1
def skip(self, bits: int) -> None:
while self.bits < bits:
self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1))
self.bits += 8
self.bits = self.bits - bits
def read(self, bits: int) -> int:
v = self.peek(bits)
self.bits = self.bits - bits
return v
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"\x00\x00\x01\xb3")
##
# Image plugin for MPEG streams. This plugin can identify a stream,
# but it cannot read it.
class MpegImageFile(ImageFile.ImageFile):
format = "MPEG"
format_description = "MPEG"
def _open(self) -> None:
assert self.fp is not None
s = BitStream(self.fp)
if s.read(32) != 0x1B3:
msg = "not an MPEG file"
raise SyntaxError(msg)
self._mode = "RGB"
self._size = s.read(12), s.read(12)
# --------------------------------------------------------------------
# Registry stuff
Image.register_open(MpegImageFile.format, MpegImageFile, _accept)
Image.register_extensions(MpegImageFile.format, [".mpg", ".mpeg"])
Image.register_mime(MpegImageFile.format, "video/mpeg")
venv\Lib\site-packages\PIL\MpoImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# MPO file handling
#
# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the
# Camera & Imaging Products Association)
#
# The multi-picture object combines multiple JPEG images (with a modified EXIF
# data format) into a single file. While it can theoretically be used much like
# a GIF animation, it is commonly used to represent 3D photographs and is (as
# of this writing) the most commonly used format by 3D cameras.
#
# History:
# 2014-03-13 Feneric Created
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
import struct
from typing import IO, Any, cast
from . import (
Image,
ImageFile,
ImageSequence,
JpegImagePlugin,
TiffImagePlugin,
)
from ._binary import o32le
from ._util import DeferredError
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
JpegImagePlugin._save(im, fp, filename)
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
append_images = im.encoderinfo.get("append_images", [])
if not append_images and not getattr(im, "is_animated", False):
_save(im, fp, filename)
return
mpf_offset = 28
offsets: list[int] = []
im_sequences = [im, *append_images]
total = sum(getattr(seq, "n_frames", 1) for seq in im_sequences)
for im_sequence in im_sequences:
for im_frame in ImageSequence.Iterator(im_sequence):
if not offsets:
# APP2 marker
ifd_length = 66 + 16 * total
im_frame.encoderinfo["extra"] = (
b"\xff\xe2"
+ struct.pack(">H", 6 + ifd_length)
+ b"MPF\0"
+ b" " * ifd_length
)
exif = im_frame.encoderinfo.get("exif")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
im_frame.encoderinfo["exif"] = exif
if exif:
mpf_offset += 4 + len(exif)
JpegImagePlugin._save(im_frame, fp, filename)
offsets.append(fp.tell())
else:
encoderinfo = im_frame._attach_default_encoderinfo(im)
im_frame.save(fp, "JPEG")
im_frame.encoderinfo = encoderinfo
offsets.append(fp.tell() - offsets[-1])
ifd = TiffImagePlugin.ImageFileDirectory_v2()
ifd[0xB000] = b"0100"
ifd[0xB001] = len(offsets)
mpentries = b""
data_offset = 0
for i, size in enumerate(offsets):
if i == 0:
mptype = 0x030000 # Baseline MP Primary Image
else:
mptype = 0x000000 # Undefined
mpentries += struct.pack(" None:
self.fp.seek(0) # prep the fp in order to pass the JPEG test
JpegImagePlugin.JpegImageFile._open(self)
self._after_jpeg_open()
def _after_jpeg_open(self, mpheader: dict[int, Any] | None = None) -> None:
self.mpinfo = mpheader if mpheader is not None else self._getmp()
if self.mpinfo is None:
msg = "Image appears to be a malformed MPO file"
raise ValueError(msg)
self.n_frames = self.mpinfo[0xB001]
self.__mpoffsets = [
mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002]
]
self.__mpoffsets[0] = 0
# Note that the following assertion will only be invalid if something
# gets broken within JpegImagePlugin.
assert self.n_frames == len(self.__mpoffsets)
del self.info["mpoffset"] # no longer needed
self.is_animated = self.n_frames > 1
self._fp = self.fp # FIXME: hack
self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame
self.__frame = 0
self.offset = 0
# for now we can only handle reading and individual frame extraction
self.readonly = 1
def load_seek(self, pos: int) -> None:
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self._fp.seek(pos)
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.fp = self._fp
self.offset = self.__mpoffsets[frame]
original_exif = self.info.get("exif")
if "exif" in self.info:
del self.info["exif"]
self.fp.seek(self.offset + 2) # skip SOI marker
if not self.fp.read(2):
msg = "No data found for frame"
raise ValueError(msg)
self.fp.seek(self.offset)
JpegImagePlugin.JpegImageFile._open(self)
if self.info.get("exif") != original_exif:
self._reload_exif()
self.tile = [
ImageFile._Tile("jpeg", (0, 0) + self.size, self.offset, self.tile[0][-1])
]
self.__frame = frame
def tell(self) -> int:
return self.__frame
@staticmethod
def adopt(
jpeg_instance: JpegImagePlugin.JpegImageFile,
mpheader: dict[int, Any] | None = None,
) -> MpoImageFile:
"""
Transform the instance of JpegImageFile into
an instance of MpoImageFile.
After the call, the JpegImageFile is extended
to be an MpoImageFile.
This is essentially useful when opening a JPEG
file that reveals itself as an MPO, to avoid
double call to _open.
"""
jpeg_instance.__class__ = MpoImageFile
mpo_instance = cast(MpoImageFile, jpeg_instance)
mpo_instance._after_jpeg_open(mpheader)
return mpo_instance
# ---------------------------------------------------------------------
# Registry stuff
# Note that since MPO shares a factory with JPEG, we do not need to do a
# separate registration for it here.
# Image.register_open(MpoImageFile.format,
# JpegImagePlugin.jpeg_factory, _accept)
Image.register_save(MpoImageFile.format, _save)
Image.register_save_all(MpoImageFile.format, _save_all)
Image.register_extension(MpoImageFile.format, ".mpo")
Image.register_mime(MpoImageFile.format, "image/mpo")
venv\Lib\site-packages\PIL\MspImagePlugin.py
#
# The Python Imaging Library.
#
# MSP file handling
#
# This is the format used by the Paint program in Windows 1 and 2.
#
# History:
# 95-09-05 fl Created
# 97-01-03 fl Read/write MSP images
# 17-02-21 es Fixed RLE interpretation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-97.
# Copyright (c) Eric Soroos 2017.
#
# See the README file for information on usage and redistribution.
#
# More info on this format: https://archive.org/details/gg243631
# Page 313:
# Figure 205. Windows Paint Version 1: "DanM" Format
# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03
#
# See also: https://www.fileformat.info/format/mspaint/egff.htm
from __future__ import annotations
import io
import struct
from typing import IO
from . import Image, ImageFile
from ._binary import i16le as i16
from ._binary import o16le as o16
#
# read MSP files
def _accept(prefix: bytes) -> bool:
return prefix.startswith((b"DanM", b"LinS"))
##
# Image plugin for Windows MSP images. This plugin supports both
# uncompressed (Windows 1.0).
class MspImageFile(ImageFile.ImageFile):
format = "MSP"
format_description = "Windows Paint"
def _open(self) -> None:
# Header
assert self.fp is not None
s = self.fp.read(32)
if not _accept(s):
msg = "not an MSP file"
raise SyntaxError(msg)
# Header checksum
checksum = 0
for i in range(0, 32, 2):
checksum = checksum ^ i16(s, i)
if checksum != 0:
msg = "bad MSP checksum"
raise SyntaxError(msg)
self._mode = "1"
self._size = i16(s, 4), i16(s, 6)
if s.startswith(b"DanM"):
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 32, "1")]
else:
self.tile = [ImageFile._Tile("MSP", (0, 0) + self.size, 32)]
class MspDecoder(ImageFile.PyDecoder):
# The algo for the MSP decoder is from
# https://www.fileformat.info/format/mspaint/egff.htm
# cc-by-attribution -- That page references is taken from the
# Encyclopedia of Graphics File Formats and is licensed by
# O'Reilly under the Creative Common/Attribution license
#
# For RLE encoded files, the 32byte header is followed by a scan
# line map, encoded as one 16bit word of encoded byte length per
# line.
#
# NOTE: the encoded length of the line can be 0. This was not
# handled in the previous version of this encoder, and there's no
# mention of how to handle it in the documentation. From the few
# examples I've seen, I've assumed that it is a fill of the
# background color, in this case, white.
#
#
# Pseudocode of the decoder:
# Read a BYTE value as the RunType
# If the RunType value is zero
# Read next byte as the RunCount
# Read the next byte as the RunValue
# Write the RunValue byte RunCount times
# If the RunType value is non-zero
# Use this value as the RunCount
# Read and write the next RunCount bytes literally
#
# e.g.:
# 0x00 03 ff 05 00 01 02 03 04
# would yield the bytes:
# 0xff ff ff 00 01 02 03 04
#
# which are then interpreted as a bit packed mode '1' image
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
img = io.BytesIO()
blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8))
try:
self.fd.seek(32)
rowmap = struct.unpack_from(
f"<{self.state.ysize}H", self.fd.read(self.state.ysize * 2)
)
except struct.error as e:
msg = "Truncated MSP file in row map"
raise OSError(msg) from e
for x, rowlen in enumerate(rowmap):
try:
if rowlen == 0:
img.write(blank_line)
continue
row = self.fd.read(rowlen)
if len(row) != rowlen:
msg = f"Truncated MSP file, expected {rowlen} bytes on row {x}"
raise OSError(msg)
idx = 0
while idx < rowlen:
runtype = row[idx]
idx += 1
if runtype == 0:
(runcount, runval) = struct.unpack_from("Bc", row, idx)
img.write(runval * runcount)
idx += 2
else:
runcount = runtype
img.write(row[idx : idx + runcount])
idx += runcount
except struct.error as e:
msg = f"Corrupted MSP file in row {x}"
raise OSError(msg) from e
self.set_as_raw(img.getvalue(), "1")
return -1, 0
Image.register_decoder("MSP", MspDecoder)
#
# write MSP files (uncompressed only)
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode != "1":
msg = f"cannot write mode {im.mode} as MSP"
raise OSError(msg)
# create MSP header
header = [0] * 16
header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1
header[2], header[3] = im.size
header[4], header[5] = 1, 1
header[6], header[7] = 1, 1
header[8], header[9] = im.size
checksum = 0
for h in header:
checksum = checksum ^ h
header[12] = checksum # FIXME: is this the right field?
# header
for h in header:
fp.write(o16(h))
# image body
ImageFile._save(im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 32, "1")])
#
# registry
Image.register_open(MspImageFile.format, MspImageFile, _accept)
Image.register_save(MspImageFile.format, _save)
Image.register_extension(MspImageFile.format, ".msp")
venv\Lib\site-packages\PIL\PaletteFile.py
#
# Python Imaging Library
# $Id$
#
# stuff to read simple, teragon-style palette files
#
# History:
# 97-08-23 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from typing import IO
from ._binary import o8
class PaletteFile:
"""File handler for Teragon-style palette files."""
rawmode = "RGB"
def __init__(self, fp: IO[bytes]) -> None:
palette = [o8(i) * 3 for i in range(256)]
while True:
s = fp.readline()
if not s:
break
if s.startswith(b"#"):
continue
if len(s) > 100:
msg = "bad palette file"
raise SyntaxError(msg)
v = [int(x) for x in s.split()]
try:
[i, r, g, b] = v
except ValueError:
[i, r] = v
g = b = r
if 0 <= i <= 255:
palette[i] = o8(r) + o8(g) + o8(b)
self.palette = b"".join(palette)
def getpalette(self) -> tuple[bytes, str]:
return self.palette, self.rawmode
#
# The Python Imaging Library.
# $Id$
#
# PCD file handling
#
# History:
# 96-05-10 fl Created
# 96-05-27 fl Added draft mode (128x192, 256x384)
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFile
##
# Image plugin for PhotoCD images. This plugin only reads the 768x512
# image from the file; higher resolutions are encoded in a proprietary
# encoding.
class PcdImageFile(ImageFile.ImageFile):
format = "PCD"
format_description = "Kodak PhotoCD"
def _open(self) -> None:
# rough
assert self.fp is not None
self.fp.seek(2048)
s = self.fp.read(2048)
if not s.startswith(b"PCD_"):
msg = "not a PCD file"
raise SyntaxError(msg)
orientation = s[1538] & 3
self.tile_post_rotate = None
if orientation == 1:
self.tile_post_rotate = 90
elif orientation == 3:
self.tile_post_rotate = -90
self._mode = "RGB"
self._size = 768, 512 # FIXME: not correct for rotated images!
self.tile = [ImageFile._Tile("pcd", (0, 0) + self.size, 96 * 2048)]
def load_end(self) -> None:
if self.tile_post_rotate:
# Handle rotated PCDs
self.im = self.im.rotate(self.tile_post_rotate)
self._size = self.im.size
#
# registry
Image.register_open(PcdImageFile.format, PcdImageFile)
Image.register_extension(PcdImageFile.format, ".pcd")
venv\Lib\site-packages\PIL\PcfFontFile.py
#
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id$
#
# portable compiled font file parser
#
# history:
# 1997-08-19 fl created
# 2003-09-13 fl fixed loading of unicode fonts
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
from typing import BinaryIO, Callable
from . import FontFile, Image
from ._binary import i8
from ._binary import i16be as b16
from ._binary import i16le as l16
from ._binary import i32be as b32
from ._binary import i32le as l32
# --------------------------------------------------------------------
# declarations
PCF_MAGIC = 0x70636601 # "\x01fcp"
PCF_PROPERTIES = 1 << 0
PCF_ACCELERATORS = 1 << 1
PCF_METRICS = 1 << 2
PCF_BITMAPS = 1 << 3
PCF_INK_METRICS = 1 << 4
PCF_BDF_ENCODINGS = 1 << 5
PCF_SWIDTHS = 1 << 6
PCF_GLYPH_NAMES = 1 << 7
PCF_BDF_ACCELERATORS = 1 << 8
BYTES_PER_ROW: list[Callable[[int], int]] = [
lambda bits: ((bits + 7) >> 3),
lambda bits: ((bits + 15) >> 3) & ~1,
lambda bits: ((bits + 31) >> 3) & ~3,
lambda bits: ((bits + 63) >> 3) & ~7,
]
def sz(s: bytes, o: int) -> bytes:
return s[o : s.index(b"\0", o)]
class PcfFontFile(FontFile.FontFile):
"""Font file plugin for the X11 PCF format."""
name = "name"
def __init__(self, fp: BinaryIO, charset_encoding: str = "iso8859-1"):
self.charset_encoding = charset_encoding
magic = l32(fp.read(4))
if magic != PCF_MAGIC:
msg = "not a PCF file"
raise SyntaxError(msg)
super().__init__()
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
#
# create glyph structure
for ch, ix in enumerate(encoding):
if ix is not None:
(
xsize,
ysize,
left,
right,
width,
ascent,
descent,
attributes,
) = metrics[ix]
self.glyph[ch] = (
(width, 0),
(left, descent - ysize, xsize + left, descent),
(0, 0, xsize, ysize),
bitmaps[ix],
)
def _getformat(
self, tag: int
) -> tuple[BinaryIO, int, Callable[[bytes], int], Callable[[bytes], int]]:
format, size, offset = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if format & 4:
i16, i32 = b16, b32
else:
i16, i32 = l16, l32
return fp, format, i16, i32
def _load_properties(self) -> dict[bytes, bytes | int]:
#
# font properties
properties = {}
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
# read property description
p = [(i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))) for _ in range(nprops)]
if nprops & 3:
fp.seek(4 - (nprops & 3), io.SEEK_CUR) # pad
data = fp.read(i32(fp.read(4)))
for k, s, v in p:
property_value: bytes | int = sz(data, v) if s else v
properties[sz(data, k)] = property_value
return properties
def _load_metrics(self) -> list[tuple[int, int, int, int, int, int, int, int]]:
#
# font metrics
metrics: list[tuple[int, int, int, int, int, int, int, int]] = []
fp, format, i16, i32 = self._getformat(PCF_METRICS)
append = metrics.append
if (format & 0xFF00) == 0x100:
# "compressed" metrics
for i in range(i16(fp.read(2))):
left = i8(fp.read(1)) - 128
right = i8(fp.read(1)) - 128
width = i8(fp.read(1)) - 128
ascent = i8(fp.read(1)) - 128
descent = i8(fp.read(1)) - 128
xsize = right - left
ysize = ascent + descent
append((xsize, ysize, left, right, width, ascent, descent, 0))
else:
# "jumbo" metrics
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = right - left
ysize = ascent + descent
append((xsize, ysize, left, right, width, ascent, descent, attributes))
return metrics
def _load_bitmaps(
self, metrics: list[tuple[int, int, int, int, int, int, int, int]]
) -> list[Image.Image]:
#
# bitmap data
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if nbitmaps != len(metrics):
msg = "Wrong number of bitmaps"
raise OSError(msg)
offsets = [i32(fp.read(4)) for _ in range(nbitmaps)]
bitmap_sizes = [i32(fp.read(4)) for _ in range(4)]
# byteorder = format & 4 # non-zero => MSB
bitorder = format & 8 # non-zero => MSB
padindex = format & 3
bitmapsize = bitmap_sizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = "1;R"
if bitorder:
mode = "1"
bitmaps = []
for i in range(nbitmaps):
xsize, ysize = metrics[i][:2]
b, e = offsets[i : i + 2]
bitmaps.append(
Image.frombytes("1", (xsize, ysize), data[b:e], "raw", mode, pad(xsize))
)
return bitmaps
def _load_encoding(self) -> list[int | None]:
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
first_col, last_col = i16(fp.read(2)), i16(fp.read(2))
first_row, last_row = i16(fp.read(2)), i16(fp.read(2))
i16(fp.read(2)) # default
nencoding = (last_col - first_col + 1) * (last_row - first_row + 1)
# map character code to bitmap index
encoding: list[int | None] = [None] * min(256, nencoding)
encoding_offsets = [i16(fp.read(2)) for _ in range(nencoding)]
for i in range(first_col, len(encoding)):
try:
encoding_offset = encoding_offsets[
ord(bytearray([i]).decode(self.charset_encoding))
]
if encoding_offset != 0xFFFF:
encoding[i] = encoding_offset
except UnicodeDecodeError:
# character is not supported in selected encoding
pass
return encoding
venv\Lib\site-packages\PIL\PcxImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# PCX file handling
#
# This format was originally used by ZSoft's popular PaintBrush
# program for the IBM PC. It is also supported by many MS-DOS and
# Windows applications, including the Windows PaintBrush program in
# Windows 3.
#
# history:
# 1995-09-01 fl Created
# 1996-05-20 fl Fixed RGB support
# 1997-01-03 fl Fixed 2-bit and 4-bit support
# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1)
# 1999-02-07 fl Added write support
# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust
# 2002-07-30 fl Seek from to current position, not beginning of file
# 2003-06-03 fl Extract DPI settings (info["dpi"])
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
import logging
from typing import IO
from . import Image, ImageFile, ImagePalette
from ._binary import i16le as i16
from ._binary import o8
from ._binary import o16le as o16
logger = logging.getLogger(__name__)
def _accept(prefix: bytes) -> bool:
return prefix[0] == 10 and prefix[1] in [0, 2, 3, 5]
##
# Image plugin for Paintbrush images.
class PcxImageFile(ImageFile.ImageFile):
format = "PCX"
format_description = "Paintbrush"
def _open(self) -> None:
# header
assert self.fp is not None
s = self.fp.read(68)
if not _accept(s):
msg = "not a PCX file"
raise SyntaxError(msg)
# image
bbox = i16(s, 4), i16(s, 6), i16(s, 8) + 1, i16(s, 10) + 1
if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
msg = "bad PCX image size"
raise SyntaxError(msg)
logger.debug("BBox: %s %s %s %s", *bbox)
offset = self.fp.tell() + 60
# format
version = s[1]
bits = s[3]
planes = s[65]
provided_stride = i16(s, 66)
logger.debug(
"PCX version %s, bits %s, planes %s, stride %s",
version,
bits,
planes,
provided_stride,
)
self.info["dpi"] = i16(s, 12), i16(s, 14)
if bits == 1 and planes == 1:
mode = rawmode = "1"
elif bits == 1 and planes in (2, 4):
mode = "P"
rawmode = f"P;{planes}L"
self.palette = ImagePalette.raw("RGB", s[16:64])
elif version == 5 and bits == 8 and planes == 1:
mode = rawmode = "L"
# FIXME: hey, this doesn't work with the incremental loader !!!
self.fp.seek(-769, io.SEEK_END)
s = self.fp.read(769)
if len(s) == 769 and s[0] == 12:
# check if the palette is linear grayscale
for i in range(256):
if s[i * 3 + 1 : i * 3 + 4] != o8(i) * 3:
mode = rawmode = "P"
break
if mode == "P":
self.palette = ImagePalette.raw("RGB", s[1:])
elif version == 5 and bits == 8 and planes == 3:
mode = "RGB"
rawmode = "RGB;L"
else:
msg = "unknown PCX mode"
raise OSError(msg)
self._mode = mode
self._size = bbox[2] - bbox[0], bbox[3] - bbox[1]
# Don't trust the passed in stride.
# Calculate the approximate position for ourselves.
# CVE-2020-35653
stride = (self._size[0] * bits + 7) // 8
# While the specification states that this must be even,
# not all images follow this
if provided_stride != stride:
stride += stride % 2
bbox = (0, 0) + self.size
logger.debug("size: %sx%s", *self.size)
self.tile = [ImageFile._Tile("pcx", bbox, offset, (rawmode, planes * stride))]
# --------------------------------------------------------------------
# save PCX files
SAVE = {
# mode: (version, bits, planes, raw mode)
"1": (2, 1, 1, "1"),
"L": (5, 8, 1, "L"),
"P": (5, 8, 1, "P"),
"RGB": (5, 8, 3, "RGB;L"),
}
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
try:
version, bits, planes, rawmode = SAVE[im.mode]
except KeyError as e:
msg = f"Cannot save {im.mode} images as PCX"
raise ValueError(msg) from e
# bytes per plane
stride = (im.size[0] * bits + 7) // 8
# stride should be even
stride += stride % 2
# Stride needs to be kept in sync with the PcxEncode.c version.
# Ideally it should be passed in in the state, but the bytes value
# gets overwritten.
logger.debug(
"PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d",
im.size[0],
bits,
stride,
)
# under windows, we could determine the current screen size with
# "Image.core.display_mode()[1]", but I think that's overkill...
screen = im.size
dpi = 100, 100
# PCX header
fp.write(
o8(10)
+ o8(version)
+ o8(1)
+ o8(bits)
+ o16(0)
+ o16(0)
+ o16(im.size[0] - 1)
+ o16(im.size[1] - 1)
+ o16(dpi[0])
+ o16(dpi[1])
+ b"\0" * 24
+ b"\xff" * 24
+ b"\0"
+ o8(planes)
+ o16(stride)
+ o16(1)
+ o16(screen[0])
+ o16(screen[1])
+ b"\0" * 54
)
assert fp.tell() == 128
ImageFile._save(
im, fp, [ImageFile._Tile("pcx", (0, 0) + im.size, 0, (rawmode, bits * planes))]
)
if im.mode == "P":
# colour palette
fp.write(o8(12))
palette = im.im.getpalette("RGB", "RGB")
palette += b"\x00" * (768 - len(palette))
fp.write(palette) # 768 bytes
elif im.mode == "L":
# grayscale palette
fp.write(o8(12))
for i in range(256):
fp.write(o8(i) * 3)
# --------------------------------------------------------------------
# registry
Image.register_open(PcxImageFile.format, PcxImageFile, _accept)
Image.register_save(PcxImageFile.format, _save)
Image.register_extension(PcxImageFile.format, ".pcx")
Image.register_mime(PcxImageFile.format, "image/x-pcx")
venv\Lib\site-packages\PIL\PdfImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# PDF (Acrobat) file handling
#
# History:
# 1996-07-16 fl Created
# 1997-01-18 fl Fixed header
# 2004-02-21 fl Fixes for 1/L/CMYK images, etc.
# 2004-02-24 fl Fixes for 1 and P images.
#
# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved.
# Copyright (c) 1996-1997 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
##
# Image plugin for PDF images (output only).
##
from __future__ import annotations
import io
import math
import os
import time
from typing import IO, Any
from . import Image, ImageFile, ImageSequence, PdfParser, __version__, features
#
# --------------------------------------------------------------------
# object ids:
# 1. catalogue
# 2. pages
# 3. image
# 4. page
# 5. page contents
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
_save(im, fp, filename, save_all=True)
##
# (Internal) Image save plugin for the PDF format.
def _write_image(
im: Image.Image,
filename: str | bytes,
existing_pdf: PdfParser.PdfParser,
image_refs: list[PdfParser.IndirectReference],
) -> tuple[PdfParser.IndirectReference, str]:
# FIXME: Should replace ASCIIHexDecode with RunLengthDecode
# (packbits) or LZWDecode (tiff/lzw compression). Note that
# PDF 1.2 also supports Flatedecode (zip compression).
params = None
decode = None
#
# Get image characteristics
width, height = im.size
dict_obj: dict[str, Any] = {"BitsPerComponent": 8}
if im.mode == "1":
if features.check("libtiff"):
decode_filter = "CCITTFaxDecode"
dict_obj["BitsPerComponent"] = 1
params = PdfParser.PdfArray(
[
PdfParser.PdfDict(
{
"K": -1,
"BlackIs1": True,
"Columns": width,
"Rows": height,
}
)
]
)
else:
decode_filter = "DCTDecode"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
procset = "ImageB" # grayscale
elif im.mode == "L":
decode_filter = "DCTDecode"
# params = f"<< /Predictor 15 /Columns {width-2} >>"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray")
procset = "ImageB" # grayscale
elif im.mode == "LA":
decode_filter = "JPXDecode"
# params = f"<< /Predictor 15 /Columns {width-2} >>"
procset = "ImageB" # grayscale
dict_obj["SMaskInData"] = 1
elif im.mode == "P":
decode_filter = "ASCIIHexDecode"
palette = im.getpalette()
assert palette is not None
dict_obj["ColorSpace"] = [
PdfParser.PdfName("Indexed"),
PdfParser.PdfName("DeviceRGB"),
len(palette) // 3 - 1,
PdfParser.PdfBinary(palette),
]
procset = "ImageI" # indexed color
if "transparency" in im.info:
smask = im.convert("LA").getchannel("A")
smask.encoderinfo = {}
image_ref = _write_image(smask, filename, existing_pdf, image_refs)[0]
dict_obj["SMask"] = image_ref
elif im.mode == "RGB":
decode_filter = "DCTDecode"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceRGB")
procset = "ImageC" # color images
elif im.mode == "RGBA":
decode_filter = "JPXDecode"
procset = "ImageC" # color images
dict_obj["SMaskInData"] = 1
elif im.mode == "CMYK":
decode_filter = "DCTDecode"
dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceCMYK")
procset = "ImageC" # color images
decode = [1, 0, 1, 0, 1, 0, 1, 0]
else:
msg = f"cannot save mode {im.mode}"
raise ValueError(msg)
#
# image
op = io.BytesIO()
if decode_filter == "ASCIIHexDecode":
ImageFile._save(im, op, [ImageFile._Tile("hex", (0, 0) + im.size, 0, im.mode)])
elif decode_filter == "CCITTFaxDecode":
im.save(
op,
"TIFF",
compression="group4",
# use a single strip
strip_size=math.ceil(width / 8) * height,
)
elif decode_filter == "DCTDecode":
Image.SAVE["JPEG"](im, op, filename)
elif decode_filter == "JPXDecode":
del dict_obj["BitsPerComponent"]
Image.SAVE["JPEG2000"](im, op, filename)
else:
msg = f"unsupported PDF filter ({decode_filter})"
raise ValueError(msg)
stream = op.getvalue()
filter: PdfParser.PdfArray | PdfParser.PdfName
if decode_filter == "CCITTFaxDecode":
stream = stream[8:]
filter = PdfParser.PdfArray([PdfParser.PdfName(decode_filter)])
else:
filter = PdfParser.PdfName(decode_filter)
image_ref = image_refs.pop(0)
existing_pdf.write_obj(
image_ref,
stream=stream,
Type=PdfParser.PdfName("XObject"),
Subtype=PdfParser.PdfName("Image"),
Width=width, # * 72.0 / x_resolution,
Height=height, # * 72.0 / y_resolution,
Filter=filter,
Decode=decode,
DecodeParms=params,
**dict_obj,
)
return image_ref, procset
def _save(
im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False
) -> None:
is_appending = im.encoderinfo.get("append", False)
filename_str = filename.decode() if isinstance(filename, bytes) else filename
if is_appending:
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename_str, mode="r+b")
else:
existing_pdf = PdfParser.PdfParser(f=fp, filename=filename_str, mode="w+b")
dpi = im.encoderinfo.get("dpi")
if dpi:
x_resolution = dpi[0]
y_resolution = dpi[1]
else:
x_resolution = y_resolution = im.encoderinfo.get("resolution", 72.0)
info = {
"title": (
None if is_appending else os.path.splitext(os.path.basename(filename))[0]
),
"author": None,
"subject": None,
"keywords": None,
"creator": None,
"producer": None,
"creationDate": None if is_appending else time.gmtime(),
"modDate": None if is_appending else time.gmtime(),
}
for k, default in info.items():
v = im.encoderinfo.get(k) if k in im.encoderinfo else default
if v:
existing_pdf.info[k[0].upper() + k[1:]] = v
#
# make sure image data is available
im.load()
existing_pdf.start_writing()
existing_pdf.write_header()
existing_pdf.write_comment(f"created by Pillow {__version__} PDF driver")
#
# pages
ims = [im]
if save_all:
append_images = im.encoderinfo.get("append_images", [])
for append_im in append_images:
append_im.encoderinfo = im.encoderinfo.copy()
ims.append(append_im)
number_of_pages = 0
image_refs = []
page_refs = []
contents_refs = []
for im in ims:
im_number_of_pages = 1
if save_all:
im_number_of_pages = getattr(im, "n_frames", 1)
number_of_pages += im_number_of_pages
for i in range(im_number_of_pages):
image_refs.append(existing_pdf.next_object_id(0))
if im.mode == "P" and "transparency" in im.info:
image_refs.append(existing_pdf.next_object_id(0))
page_refs.append(existing_pdf.next_object_id(0))
contents_refs.append(existing_pdf.next_object_id(0))
existing_pdf.pages.append(page_refs[-1])
#
# catalog and list of pages
existing_pdf.write_catalog()
page_number = 0
for im_sequence in ims:
im_pages: ImageSequence.Iterator | list[Image.Image] = (
ImageSequence.Iterator(im_sequence) if save_all else [im_sequence]
)
for im in im_pages:
image_ref, procset = _write_image(im, filename, existing_pdf, image_refs)
#
# page
existing_pdf.write_page(
page_refs[page_number],
Resources=PdfParser.PdfDict(
ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)],
XObject=PdfParser.PdfDict(image=image_ref),
),
MediaBox=[
0,
0,
im.width * 72.0 / x_resolution,
im.height * 72.0 / y_resolution,
],
Contents=contents_refs[page_number],
)
#
# page contents
page_contents = b"q %f 0 0 %f 0 0 cm /image Do Q\n" % (
im.width * 72.0 / x_resolution,
im.height * 72.0 / y_resolution,
)
existing_pdf.write_obj(contents_refs[page_number], stream=page_contents)
page_number += 1
#
# trailer
existing_pdf.write_xref_and_trailer()
if hasattr(fp, "flush"):
fp.flush()
existing_pdf.close()
#
# --------------------------------------------------------------------
Image.register_save("PDF", _save)
Image.register_save_all("PDF", _save_all)
Image.register_extension("PDF", ".pdf")
Image.register_mime("PDF", "application/pdf")
venv\Lib\site-packages\PIL\PdfParser.py
from __future__ import annotations
import calendar
import codecs
import collections
import mmap
import os
import re
import time
import zlib
from typing import IO, Any, NamedTuple, Union
# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set
# on page 656
def encode_text(s: str) -> bytes:
return codecs.BOM_UTF16_BE + s.encode("utf_16_be")
PDFDocEncoding = {
0x16: "\u0017",
0x18: "\u02d8",
0x19: "\u02c7",
0x1A: "\u02c6",
0x1B: "\u02d9",
0x1C: "\u02dd",
0x1D: "\u02db",
0x1E: "\u02da",
0x1F: "\u02dc",
0x80: "\u2022",
0x81: "\u2020",
0x82: "\u2021",
0x83: "\u2026",
0x84: "\u2014",
0x85: "\u2013",
0x86: "\u0192",
0x87: "\u2044",
0x88: "\u2039",
0x89: "\u203a",
0x8A: "\u2212",
0x8B: "\u2030",
0x8C: "\u201e",
0x8D: "\u201c",
0x8E: "\u201d",
0x8F: "\u2018",
0x90: "\u2019",
0x91: "\u201a",
0x92: "\u2122",
0x93: "\ufb01",
0x94: "\ufb02",
0x95: "\u0141",
0x96: "\u0152",
0x97: "\u0160",
0x98: "\u0178",
0x99: "\u017d",
0x9A: "\u0131",
0x9B: "\u0142",
0x9C: "\u0153",
0x9D: "\u0161",
0x9E: "\u017e",
0xA0: "\u20ac",
}
def decode_text(b: bytes) -> str:
if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE:
return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be")
else:
return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b)
class PdfFormatError(RuntimeError):
"""An error that probably indicates a syntactic or semantic error in the
PDF file structure"""
pass
def check_format_condition(condition: bool, error_message: str) -> None:
if not condition:
raise PdfFormatError(error_message)
class IndirectReferenceTuple(NamedTuple):
object_id: int
generation: int
class IndirectReference(IndirectReferenceTuple):
def __str__(self) -> str:
return f"{self.object_id} {self.generation} R"
def __bytes__(self) -> bytes:
return self.__str__().encode("us-ascii")
def __eq__(self, other: object) -> bool:
if self.__class__ is not other.__class__:
return False
assert isinstance(other, IndirectReference)
return other.object_id == self.object_id and other.generation == self.generation
def __ne__(self, other: object) -> bool:
return not (self == other)
def __hash__(self) -> int:
return hash((self.object_id, self.generation))
class IndirectObjectDef(IndirectReference):
def __str__(self) -> str:
return f"{self.object_id} {self.generation} obj"
class XrefTable:
def __init__(self) -> None:
self.existing_entries: dict[int, tuple[int, int]] = (
{}
) # object ID => (offset, generation)
self.new_entries: dict[int, tuple[int, int]] = (
{}
) # object ID => (offset, generation)
self.deleted_entries = {0: 65536} # object ID => generation
self.reading_finished = False
def __setitem__(self, key: int, value: tuple[int, int]) -> None:
if self.reading_finished:
self.new_entries[key] = value
else:
self.existing_entries[key] = value
if key in self.deleted_entries:
del self.deleted_entries[key]
def __getitem__(self, key: int) -> tuple[int, int]:
try:
return self.new_entries[key]
except KeyError:
return self.existing_entries[key]
def __delitem__(self, key: int) -> None:
if key in self.new_entries:
generation = self.new_entries[key][1] + 1
del self.new_entries[key]
self.deleted_entries[key] = generation
elif key in self.existing_entries:
generation = self.existing_entries[key][1] + 1
self.deleted_entries[key] = generation
elif key in self.deleted_entries:
generation = self.deleted_entries[key]
else:
msg = f"object ID {key} cannot be deleted because it doesn't exist"
raise IndexError(msg)
def __contains__(self, key: int) -> bool:
return key in self.existing_entries or key in self.new_entries
def __len__(self) -> int:
return len(
set(self.existing_entries.keys())
| set(self.new_entries.keys())
| set(self.deleted_entries.keys())
)
def keys(self) -> set[int]:
return (
set(self.existing_entries.keys()) - set(self.deleted_entries.keys())
) | set(self.new_entries.keys())
def write(self, f: IO[bytes]) -> int:
keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys()))
deleted_keys = sorted(set(self.deleted_entries.keys()))
startxref = f.tell()
f.write(b"xref\n")
while keys:
# find a contiguous sequence of object IDs
prev: int | None = None
for index, key in enumerate(keys):
if prev is None or prev + 1 == key:
prev = key
else:
contiguous_keys = keys[:index]
keys = keys[index:]
break
else:
contiguous_keys = keys
keys = []
f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys)))
for object_id in contiguous_keys:
if object_id in self.new_entries:
f.write(b"%010d %05d n \n" % self.new_entries[object_id])
else:
this_deleted_object_id = deleted_keys.pop(0)
check_format_condition(
object_id == this_deleted_object_id,
f"expected the next deleted object ID to be {object_id}, "
f"instead found {this_deleted_object_id}",
)
try:
next_in_linked_list = deleted_keys[0]
except IndexError:
next_in_linked_list = 0
f.write(
b"%010d %05d f \n"
% (next_in_linked_list, self.deleted_entries[object_id])
)
return startxref
class PdfName:
name: bytes
def __init__(self, name: PdfName | bytes | str) -> None:
if isinstance(name, PdfName):
self.name = name.name
elif isinstance(name, bytes):
self.name = name
else:
self.name = name.encode("us-ascii")
def name_as_str(self) -> str:
return self.name.decode("us-ascii")
def __eq__(self, other: object) -> bool:
return (
isinstance(other, PdfName) and other.name == self.name
) or other == self.name
def __hash__(self) -> int:
return hash(self.name)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({repr(self.name)})"
@classmethod
def from_pdf_stream(cls, data: bytes) -> PdfName:
return cls(PdfParser.interpret_name(data))
allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"}
def __bytes__(self) -> bytes:
result = bytearray(b"/")
for b in self.name:
if b in self.allowed_chars:
result.append(b)
else:
result.extend(b"#%02X" % b)
return bytes(result)
class PdfArray(list[Any]):
def __bytes__(self) -> bytes:
return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]"
TYPE_CHECKING = False
if TYPE_CHECKING:
_DictBase = collections.UserDict[Union[str, bytes], Any]
else:
_DictBase = collections.UserDict
class PdfDict(_DictBase):
def __setattr__(self, key: str, value: Any) -> None:
if key == "data":
collections.UserDict.__setattr__(self, key, value)
else:
self[key.encode("us-ascii")] = value
def __getattr__(self, key: str) -> str | time.struct_time:
try:
value = self[key.encode("us-ascii")]
except KeyError as e:
raise AttributeError(key) from e
if isinstance(value, bytes):
value = decode_text(value)
if key.endswith("Date"):
if value.startswith("D:"):
value = value[2:]
relationship = "Z"
if len(value) > 17:
relationship = value[14]
offset = int(value[15:17]) * 60
if len(value) > 20:
offset += int(value[18:20])
format = "%Y%m%d%H%M%S"[: len(value) - 2]
value = time.strptime(value[: len(format) + 2], format)
if relationship in ["+", "-"]:
offset *= 60
if relationship == "+":
offset *= -1
value = time.gmtime(calendar.timegm(value) + offset)
return value
def __bytes__(self) -> bytes:
out = bytearray(b"<<")
for key, value in self.items():
if value is None:
continue
value = pdf_repr(value)
out.extend(b"\n")
out.extend(bytes(PdfName(key)))
out.extend(b" ")
out.extend(value)
out.extend(b"\n>>")
return bytes(out)
class PdfBinary:
def __init__(self, data: list[int] | bytes) -> None:
self.data = data
def __bytes__(self) -> bytes:
return b"<%s>" % b"".join(b"%02X" % b for b in self.data)
class PdfStream:
def __init__(self, dictionary: PdfDict, buf: bytes) -> None:
self.dictionary = dictionary
self.buf = buf
def decode(self) -> bytes:
try:
filter = self.dictionary[b"Filter"]
except KeyError:
return self.buf
if filter == b"FlateDecode":
try:
expected_length = self.dictionary[b"DL"]
except KeyError:
expected_length = self.dictionary[b"Length"]
return zlib.decompress(self.buf, bufsize=int(expected_length))
else:
msg = f"stream filter {repr(filter)} unknown/unsupported"
raise NotImplementedError(msg)
def pdf_repr(x: Any) -> bytes:
if x is True:
return b"true"
elif x is False:
return b"false"
elif x is None:
return b"null"
elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)):
return bytes(x)
elif isinstance(x, (int, float)):
return str(x).encode("us-ascii")
elif isinstance(x, time.struct_time):
return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")"
elif isinstance(x, dict):
return bytes(PdfDict(x))
elif isinstance(x, list):
return bytes(PdfArray(x))
elif isinstance(x, str):
return pdf_repr(encode_text(x))
elif isinstance(x, bytes):
# XXX escape more chars? handle binary garbage
x = x.replace(b"\\", b"\\\\")
x = x.replace(b"(", b"\\(")
x = x.replace(b")", b"\\)")
return b"(" + x + b")"
else:
return bytes(x)
class PdfParser:
"""Based on
https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf
Supports PDF up to 1.4
"""
def __init__(
self,
filename: str | None = None,
f: IO[bytes] | None = None,
buf: bytes | bytearray | None = None,
start_offset: int = 0,
mode: str = "rb",
) -> None:
if buf and f:
msg = "specify buf or f or filename, but not both buf and f"
raise RuntimeError(msg)
self.filename = filename
self.buf: bytes | bytearray | mmap.mmap | None = buf
self.f = f
self.start_offset = start_offset
self.should_close_buf = False
self.should_close_file = False
if filename is not None and f is None:
self.f = f = open(filename, mode)
self.should_close_file = True
if f is not None:
self.buf = self.get_buf_from_file(f)
self.should_close_buf = True
if not filename and hasattr(f, "name"):
self.filename = f.name
self.cached_objects: dict[IndirectReference, Any] = {}
self.root_ref: IndirectReference | None
self.info_ref: IndirectReference | None
self.pages_ref: IndirectReference | None
self.last_xref_section_offset: int | None
if self.buf:
self.read_pdf_info()
else:
self.file_size_total = self.file_size_this = 0
self.root = PdfDict()
self.root_ref = None
self.info = PdfDict()
self.info_ref = None
self.page_tree_root = PdfDict()
self.pages: list[IndirectReference] = []
self.orig_pages: list[IndirectReference] = []
self.pages_ref = None
self.last_xref_section_offset = None
self.trailer_dict: dict[bytes, Any] = {}
self.xref_table = XrefTable()
self.xref_table.reading_finished = True
if f:
self.seek_end()
def __enter__(self) -> PdfParser:
return self
def __exit__(self, *args: object) -> None:
self.close()
def start_writing(self) -> None:
self.close_buf()
self.seek_end()
def close_buf(self) -> None:
if isinstance(self.buf, mmap.mmap):
self.buf.close()
self.buf = None
def close(self) -> None:
if self.should_close_buf:
self.close_buf()
if self.f is not None and self.should_close_file:
self.f.close()
self.f = None
def seek_end(self) -> None:
assert self.f is not None
self.f.seek(0, os.SEEK_END)
def write_header(self) -> None:
assert self.f is not None
self.f.write(b"%PDF-1.4\n")
def write_comment(self, s: str) -> None:
assert self.f is not None
self.f.write(f"% {s}\n".encode())
def write_catalog(self) -> IndirectReference:
assert self.f is not None
self.del_root()
self.root_ref = self.next_object_id(self.f.tell())
self.pages_ref = self.next_object_id(0)
self.rewrite_pages()
self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref)
self.write_obj(
self.pages_ref,
Type=PdfName(b"Pages"),
Count=len(self.pages),
Kids=self.pages,
)
return self.root_ref
def rewrite_pages(self) -> None:
pages_tree_nodes_to_delete = []
for i, page_ref in enumerate(self.orig_pages):
page_info = self.cached_objects[page_ref]
del self.xref_table[page_ref.object_id]
pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")])
if page_ref not in self.pages:
# the page has been deleted
continue
# make dict keys into strings for passing to write_page
stringified_page_info = {}
for key, value in page_info.items():
# key should be a PdfName
stringified_page_info[key.name_as_str()] = value
stringified_page_info["Parent"] = self.pages_ref
new_page_ref = self.write_page(None, **stringified_page_info)
for j, cur_page_ref in enumerate(self.pages):
if cur_page_ref == page_ref:
# replace the page reference with the new one
self.pages[j] = new_page_ref
# delete redundant Pages tree nodes from xref table
for pages_tree_node_ref in pages_tree_nodes_to_delete:
while pages_tree_node_ref:
pages_tree_node = self.cached_objects[pages_tree_node_ref]
if pages_tree_node_ref.object_id in self.xref_table:
del self.xref_table[pages_tree_node_ref.object_id]
pages_tree_node_ref = pages_tree_node.get(b"Parent", None)
self.orig_pages = []
def write_xref_and_trailer(
self, new_root_ref: IndirectReference | None = None
) -> None:
assert self.f is not None
if new_root_ref:
self.del_root()
self.root_ref = new_root_ref
if self.info:
self.info_ref = self.write_obj(None, self.info)
start_xref = self.xref_table.write(self.f)
num_entries = len(self.xref_table)
trailer_dict: dict[str | bytes, Any] = {
b"Root": self.root_ref,
b"Size": num_entries,
}
if self.last_xref_section_offset is not None:
trailer_dict[b"Prev"] = self.last_xref_section_offset
if self.info:
trailer_dict[b"Info"] = self.info_ref
self.last_xref_section_offset = start_xref
self.f.write(
b"trailer\n"
+ bytes(PdfDict(trailer_dict))
+ b"\nstartxref\n%d\n%%%%EOF" % start_xref
)
def write_page(
self, ref: int | IndirectReference | None, *objs: Any, **dict_obj: Any
) -> IndirectReference:
obj_ref = self.pages[ref] if isinstance(ref, int) else ref
if "Type" not in dict_obj:
dict_obj["Type"] = PdfName(b"Page")
if "Parent" not in dict_obj:
dict_obj["Parent"] = self.pages_ref
return self.write_obj(obj_ref, *objs, **dict_obj)
def write_obj(
self, ref: IndirectReference | None, *objs: Any, **dict_obj: Any
) -> IndirectReference:
assert self.f is not None
f = self.f
if ref is None:
ref = self.next_object_id(f.tell())
else:
self.xref_table[ref.object_id] = (f.tell(), ref.generation)
f.write(bytes(IndirectObjectDef(*ref)))
stream = dict_obj.pop("stream", None)
if stream is not None:
dict_obj["Length"] = len(stream)
if dict_obj:
f.write(pdf_repr(dict_obj))
for obj in objs:
f.write(pdf_repr(obj))
if stream is not None:
f.write(b"stream\n")
f.write(stream)
f.write(b"\nendstream\n")
f.write(b"endobj\n")
return ref
def del_root(self) -> None:
if self.root_ref is None:
return
del self.xref_table[self.root_ref.object_id]
del self.xref_table[self.root[b"Pages"].object_id]
@staticmethod
def get_buf_from_file(f: IO[bytes]) -> bytes | mmap.mmap:
if hasattr(f, "getbuffer"):
return f.getbuffer()
elif hasattr(f, "getvalue"):
return f.getvalue()
else:
try:
return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
except ValueError: # cannot mmap an empty file
return b""
def read_pdf_info(self) -> None:
assert self.buf is not None
self.file_size_total = len(self.buf)
self.file_size_this = self.file_size_total - self.start_offset
self.read_trailer()
check_format_condition(
self.trailer_dict.get(b"Root") is not None, "Root is missing"
)
self.root_ref = self.trailer_dict[b"Root"]
assert self.root_ref is not None
self.info_ref = self.trailer_dict.get(b"Info", None)
self.root = PdfDict(self.read_indirect(self.root_ref))
if self.info_ref is None:
self.info = PdfDict()
else:
self.info = PdfDict(self.read_indirect(self.info_ref))
check_format_condition(b"Type" in self.root, "/Type missing in Root")
check_format_condition(
self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog"
)
check_format_condition(
self.root.get(b"Pages") is not None, "/Pages missing in Root"
)
check_format_condition(
isinstance(self.root[b"Pages"], IndirectReference),
"/Pages in Root is not an indirect reference",
)
self.pages_ref = self.root[b"Pages"]
assert self.pages_ref is not None
self.page_tree_root = self.read_indirect(self.pages_ref)
self.pages = self.linearize_page_tree(self.page_tree_root)
# save the original list of page references
# in case the user modifies, adds or deletes some pages
# and we need to rewrite the pages and their list
self.orig_pages = self.pages[:]
def next_object_id(self, offset: int | None = None) -> IndirectReference:
try:
# TODO: support reuse of deleted objects
reference = IndirectReference(max(self.xref_table.keys()) + 1, 0)
except ValueError:
reference = IndirectReference(1, 0)
if offset is not None:
self.xref_table[reference.object_id] = (offset, 0)
return reference
delimiter = rb"[][()<>{}/%]"
delimiter_or_ws = rb"[][()<>{}/%\000\011\012\014\015\040]"
whitespace = rb"[\000\011\012\014\015\040]"
whitespace_or_hex = rb"[\000\011\012\014\015\0400-9a-fA-F]"
whitespace_optional = whitespace + b"*"
whitespace_mandatory = whitespace + b"+"
# No "\012" aka "\n" or "\015" aka "\r":
whitespace_optional_no_nl = rb"[\000\011\014\040]*"
newline_only = rb"[\r\n]+"
newline = whitespace_optional_no_nl + newline_only + whitespace_optional_no_nl
re_trailer_end = re.compile(
whitespace_mandatory
+ rb"trailer"
+ whitespace_optional
+ rb"<<(.*>>)"
+ newline
+ rb"startxref"
+ newline
+ rb"([0-9]+)"
+ newline
+ rb"%%EOF"
+ whitespace_optional
+ rb"$",
re.DOTALL,
)
re_trailer_prev = re.compile(
whitespace_optional
+ rb"trailer"
+ whitespace_optional
+ rb"<<(.*?>>)"
+ newline
+ rb"startxref"
+ newline
+ rb"([0-9]+)"
+ newline
+ rb"%%EOF"
+ whitespace_optional,
re.DOTALL,
)
def read_trailer(self) -> None:
assert self.buf is not None
search_start_offset = len(self.buf) - 16384
if search_start_offset < self.start_offset:
search_start_offset = self.start_offset
m = self.re_trailer_end.search(self.buf, search_start_offset)
check_format_condition(m is not None, "trailer end not found")
# make sure we found the LAST trailer
last_match = m
while m:
last_match = m
m = self.re_trailer_end.search(self.buf, m.start() + 16)
if not m:
m = last_match
assert m is not None
trailer_data = m.group(1)
self.last_xref_section_offset = int(m.group(2))
self.trailer_dict = self.interpret_trailer(trailer_data)
self.xref_table = XrefTable()
self.read_xref_table(xref_section_offset=self.last_xref_section_offset)
if b"Prev" in self.trailer_dict:
self.read_prev_trailer(self.trailer_dict[b"Prev"])
def read_prev_trailer(self, xref_section_offset: int) -> None:
assert self.buf is not None
trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset)
m = self.re_trailer_prev.search(
self.buf[trailer_offset : trailer_offset + 16384]
)
check_format_condition(m is not None, "previous trailer not found")
assert m is not None
trailer_data = m.group(1)
check_format_condition(
int(m.group(2)) == xref_section_offset,
"xref section offset in previous trailer doesn't match what was expected",
)
trailer_dict = self.interpret_trailer(trailer_data)
if b"Prev" in trailer_dict:
self.read_prev_trailer(trailer_dict[b"Prev"])
re_whitespace_optional = re.compile(whitespace_optional)
re_name = re.compile(
whitespace_optional
+ rb"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?="
+ delimiter_or_ws
+ rb")"
)
re_dict_start = re.compile(whitespace_optional + rb"<<")
re_dict_end = re.compile(whitespace_optional + rb">>" + whitespace_optional)
@classmethod
def interpret_trailer(cls, trailer_data: bytes) -> dict[bytes, Any]:
trailer = {}
offset = 0
while True:
m = cls.re_name.match(trailer_data, offset)
if not m:
m = cls.re_dict_end.match(trailer_data, offset)
check_format_condition(
m is not None and m.end() == len(trailer_data),
"name not found in trailer, remaining data: "
+ repr(trailer_data[offset:]),
)
break
key = cls.interpret_name(m.group(1))
assert isinstance(key, bytes)
value, value_offset = cls.get_value(trailer_data, m.end())
trailer[key] = value
if value_offset is None:
break
offset = value_offset
check_format_condition(
b"Size" in trailer and isinstance(trailer[b"Size"], int),
"/Size not in trailer or not an integer",
)
check_format_condition(
b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference),
"/Root not in trailer or not an indirect reference",
)
return trailer
re_hashes_in_name = re.compile(rb"([^#]*)(#([0-9a-fA-F]{2}))?")
@classmethod
def interpret_name(cls, raw: bytes, as_text: bool = False) -> str | bytes:
name = b""
for m in cls.re_hashes_in_name.finditer(raw):
if m.group(3):
name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii"))
else:
name += m.group(1)
if as_text:
return name.decode("utf-8")
else:
return bytes(name)
re_null = re.compile(whitespace_optional + rb"null(?=" + delimiter_or_ws + rb")")
re_true = re.compile(whitespace_optional + rb"true(?=" + delimiter_or_ws + rb")")
re_false = re.compile(whitespace_optional + rb"false(?=" + delimiter_or_ws + rb")")
re_int = re.compile(
whitespace_optional + rb"([-+]?[0-9]+)(?=" + delimiter_or_ws + rb")"
)
re_real = re.compile(
whitespace_optional
+ rb"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?="
+ delimiter_or_ws
+ rb")"
)
re_array_start = re.compile(whitespace_optional + rb"\[")
re_array_end = re.compile(whitespace_optional + rb"]")
re_string_hex = re.compile(
whitespace_optional + rb"<(" + whitespace_or_hex + rb"*)>"
)
re_string_lit = re.compile(whitespace_optional + rb"\(")
re_indirect_reference = re.compile(
whitespace_optional
+ rb"([-+]?[0-9]+)"
+ whitespace_mandatory
+ rb"([-+]?[0-9]+)"
+ whitespace_mandatory
+ rb"R(?="
+ delimiter_or_ws
+ rb")"
)
re_indirect_def_start = re.compile(
whitespace_optional
+ rb"([-+]?[0-9]+)"
+ whitespace_mandatory
+ rb"([-+]?[0-9]+)"
+ whitespace_mandatory
+ rb"obj(?="
+ delimiter_or_ws
+ rb")"
)
re_indirect_def_end = re.compile(
whitespace_optional + rb"endobj(?=" + delimiter_or_ws + rb")"
)
re_comment = re.compile(
rb"(" + whitespace_optional + rb"%[^\r\n]*" + newline + rb")*"
)
re_stream_start = re.compile(whitespace_optional + rb"stream\r?\n")
re_stream_end = re.compile(
whitespace_optional + rb"endstream(?=" + delimiter_or_ws + rb")"
)
@classmethod
def get_value(
cls,
data: bytes | bytearray | mmap.mmap,
offset: int,
expect_indirect: IndirectReference | None = None,
max_nesting: int = -1,
) -> tuple[Any, int | None]:
if max_nesting == 0:
return None, None
m = cls.re_comment.match(data, offset)
if m:
offset = m.end()
m = cls.re_indirect_def_start.match(data, offset)
if m:
check_format_condition(
int(m.group(1)) > 0,
"indirect object definition: object ID must be greater than 0",
)
check_format_condition(
int(m.group(2)) >= 0,
"indirect object definition: generation must be non-negative",
)
check_format_condition(
expect_indirect is None
or expect_indirect
== IndirectReference(int(m.group(1)), int(m.group(2))),
"indirect object definition different than expected",
)
object, object_offset = cls.get_value(
data, m.end(), max_nesting=max_nesting - 1
)
if object_offset is None:
return object, None
m = cls.re_indirect_def_end.match(data, object_offset)
check_format_condition(
m is not None, "indirect object definition end not found"
)
assert m is not None
return object, m.end()
check_format_condition(
not expect_indirect, "indirect object definition not found"
)
m = cls.re_indirect_reference.match(data, offset)
if m:
check_format_condition(
int(m.group(1)) > 0,
"indirect object reference: object ID must be greater than 0",
)
check_format_condition(
int(m.group(2)) >= 0,
"indirect object reference: generation must be non-negative",
)
return IndirectReference(int(m.group(1)), int(m.group(2))), m.end()
m = cls.re_dict_start.match(data, offset)
if m:
offset = m.end()
result: dict[Any, Any] = {}
m = cls.re_dict_end.match(data, offset)
current_offset: int | None = offset
while not m:
assert current_offset is not None
key, current_offset = cls.get_value(
data, current_offset, max_nesting=max_nesting - 1
)
if current_offset is None:
return result, None
value, current_offset = cls.get_value(
data, current_offset, max_nesting=max_nesting - 1
)
result[key] = value
if current_offset is None:
return result, None
m = cls.re_dict_end.match(data, current_offset)
current_offset = m.end()
m = cls.re_stream_start.match(data, current_offset)
if m:
stream_len = result.get(b"Length")
if stream_len is None or not isinstance(stream_len, int):
msg = f"bad or missing Length in stream dict ({stream_len})"
raise PdfFormatError(msg)
stream_data = data[m.end() : m.end() + stream_len]
m = cls.re_stream_end.match(data, m.end() + stream_len)
check_format_condition(m is not None, "stream end not found")
assert m is not None
current_offset = m.end()
return PdfStream(PdfDict(result), stream_data), current_offset
return PdfDict(result), current_offset
m = cls.re_array_start.match(data, offset)
if m:
offset = m.end()
results = []
m = cls.re_array_end.match(data, offset)
current_offset = offset
while not m:
assert current_offset is not None
value, current_offset = cls.get_value(
data, current_offset, max_nesting=max_nesting - 1
)
results.append(value)
if current_offset is None:
return results, None
m = cls.re_array_end.match(data, current_offset)
return results, m.end()
m = cls.re_null.match(data, offset)
if m:
return None, m.end()
m = cls.re_true.match(data, offset)
if m:
return True, m.end()
m = cls.re_false.match(data, offset)
if m:
return False, m.end()
m = cls.re_name.match(data, offset)
if m:
return PdfName(cls.interpret_name(m.group(1))), m.end()
m = cls.re_int.match(data, offset)
if m:
return int(m.group(1)), m.end()
m = cls.re_real.match(data, offset)
if m:
# XXX Decimal instead of float???
return float(m.group(1)), m.end()
m = cls.re_string_hex.match(data, offset)
if m:
# filter out whitespace
hex_string = bytearray(
b for b in m.group(1) if b in b"0123456789abcdefABCDEF"
)
if len(hex_string) % 2 == 1:
# append a 0 if the length is not even - yes, at the end
hex_string.append(ord(b"0"))
return bytearray.fromhex(hex_string.decode("us-ascii")), m.end()
m = cls.re_string_lit.match(data, offset)
if m:
return cls.get_literal_string(data, m.end())
# return None, offset # fallback (only for debugging)
msg = f"unrecognized object: {repr(data[offset : offset + 32])}"
raise PdfFormatError(msg)
re_lit_str_token = re.compile(
rb"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))"
)
escaped_chars = {
b"n": b"\n",
b"r": b"\r",
b"t": b"\t",
b"b": b"\b",
b"f": b"\f",
b"(": b"(",
b")": b")",
b"\\": b"\\",
ord(b"n"): b"\n",
ord(b"r"): b"\r",
ord(b"t"): b"\t",
ord(b"b"): b"\b",
ord(b"f"): b"\f",
ord(b"("): b"(",
ord(b")"): b")",
ord(b"\\"): b"\\",
}
@classmethod
def get_literal_string(
cls, data: bytes | bytearray | mmap.mmap, offset: int
) -> tuple[bytes, int]:
nesting_depth = 0
result = bytearray()
for m in cls.re_lit_str_token.finditer(data, offset):
result.extend(data[offset : m.start()])
if m.group(1):
result.extend(cls.escaped_chars[m.group(1)[1]])
elif m.group(2):
result.append(int(m.group(2)[1:], 8))
elif m.group(3):
pass
elif m.group(5):
result.extend(b"\n")
elif m.group(6):
result.extend(b"(")
nesting_depth += 1
elif m.group(7):
if nesting_depth == 0:
return bytes(result), m.end()
result.extend(b")")
nesting_depth -= 1
offset = m.end()
msg = "unfinished literal string"
raise PdfFormatError(msg)
re_xref_section_start = re.compile(whitespace_optional + rb"xref" + newline)
re_xref_subsection_start = re.compile(
whitespace_optional
+ rb"([0-9]+)"
+ whitespace_mandatory
+ rb"([0-9]+)"
+ whitespace_optional
+ newline_only
)
re_xref_entry = re.compile(rb"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)")
def read_xref_table(self, xref_section_offset: int) -> int:
assert self.buf is not None
subsection_found = False
m = self.re_xref_section_start.match(
self.buf, xref_section_offset + self.start_offset
)
check_format_condition(m is not None, "xref section start not found")
assert m is not None
offset = m.end()
while True:
m = self.re_xref_subsection_start.match(self.buf, offset)
if not m:
check_format_condition(
subsection_found, "xref subsection start not found"
)
break
subsection_found = True
offset = m.end()
first_object = int(m.group(1))
num_objects = int(m.group(2))
for i in range(first_object, first_object + num_objects):
m = self.re_xref_entry.match(self.buf, offset)
check_format_condition(m is not None, "xref entry not found")
assert m is not None
offset = m.end()
is_free = m.group(3) == b"f"
if not is_free:
generation = int(m.group(2))
new_entry = (int(m.group(1)), generation)
if i not in self.xref_table:
self.xref_table[i] = new_entry
return offset
def read_indirect(self, ref: IndirectReference, max_nesting: int = -1) -> Any:
offset, generation = self.xref_table[ref[0]]
check_format_condition(
generation == ref[1],
f"expected to find generation {ref[1]} for object ID {ref[0]} in xref "
f"table, instead found generation {generation} at offset {offset}",
)
assert self.buf is not None
value = self.get_value(
self.buf,
offset + self.start_offset,
expect_indirect=IndirectReference(*ref),
max_nesting=max_nesting,
)[0]
self.cached_objects[ref] = value
return value
def linearize_page_tree(
self, node: PdfDict | None = None
) -> list[IndirectReference]:
page_node = node if node is not None else self.page_tree_root
check_format_condition(
page_node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages"
)
pages = []
for kid in page_node[b"Kids"]:
kid_object = self.read_indirect(kid)
if kid_object[b"Type"] == b"Page":
pages.append(kid)
else:
pages.extend(self.linearize_page_tree(node=kid_object))
return pages
venv\Lib\site-packages\PIL\PixarImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# PIXAR raster support for PIL
#
# history:
# 97-01-29 fl Created
#
# notes:
# This is incomplete; it is based on a few samples created with
# Photoshop 2.5 and 3.0, and a summary description provided by
# Greg Coats . Hopefully, "L" and
# "RGBA" support will be added in future versions.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFile
from ._binary import i16le as i16
#
# helpers
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"\200\350\000\000")
##
# Image plugin for PIXAR raster images.
class PixarImageFile(ImageFile.ImageFile):
format = "PIXAR"
format_description = "PIXAR raster image"
def _open(self) -> None:
# assuming a 4-byte magic label
assert self.fp is not None
s = self.fp.read(4)
if not _accept(s):
msg = "not a PIXAR file"
raise SyntaxError(msg)
# read rest of header
s = s + self.fp.read(508)
self._size = i16(s, 418), i16(s, 416)
# get channel/depth descriptions
mode = i16(s, 424), i16(s, 426)
if mode == (14, 2):
self._mode = "RGB"
# FIXME: to be continued...
# create tile descriptor (assuming "dumped")
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 1024, self.mode)]
#
# --------------------------------------------------------------------
Image.register_open(PixarImageFile.format, PixarImageFile, _accept)
Image.register_extension(PixarImageFile.format, ".pxr")
venv\Lib\site-packages\PIL\PngImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# PNG support code
#
# See "PNG (Portable Network Graphics) Specification, version 1.0;
# W3C Recommendation", 1996-10-01, Thomas Boutell (ed.).
#
# history:
# 1996-05-06 fl Created (couldn't resist it)
# 1996-12-14 fl Upgraded, added read and verify support (0.2)
# 1996-12-15 fl Separate PNG stream parser
# 1996-12-29 fl Added write support, added getchunks
# 1996-12-30 fl Eliminated circular references in decoder (0.3)
# 1998-07-12 fl Read/write 16-bit images as mode I (0.4)
# 2001-02-08 fl Added transparency support (from Zircon) (0.5)
# 2001-04-16 fl Don't close data source in "open" method (0.6)
# 2004-02-24 fl Don't even pretend to support interlaced files (0.7)
# 2004-08-31 fl Do basic sanity check on chunk identifiers (0.8)
# 2004-09-20 fl Added PngInfo chunk container
# 2004-12-18 fl Added DPI read support (based on code by Niki Spahiev)
# 2008-08-13 fl Added tRNS support for RGB images
# 2009-03-06 fl Support for preserving ICC profiles (by Florian Hoech)
# 2009-03-08 fl Added zTXT support (from Lowell Alleman)
# 2009-03-29 fl Read interlaced PNG files (from Conrado Porto Lopes Gouvua)
#
# Copyright (c) 1997-2009 by Secret Labs AB
# Copyright (c) 1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import itertools
import logging
import re
import struct
import warnings
import zlib
from collections.abc import Callable
from enum import IntEnum
from typing import IO, Any, NamedTuple, NoReturn, cast
from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._binary import o8
from ._binary import o16be as o16
from ._binary import o32be as o32
from ._deprecate import deprecate
from ._util import DeferredError
TYPE_CHECKING = False
if TYPE_CHECKING:
from . import _imaging
logger = logging.getLogger(__name__)
is_cid = re.compile(rb"\w\w\w\w").match
_MAGIC = b"\211PNG\r\n\032\n"
_MODES = {
# supported bits/color combinations, and corresponding modes/rawmodes
# Grayscale
(1, 0): ("1", "1"),
(2, 0): ("L", "L;2"),
(4, 0): ("L", "L;4"),
(8, 0): ("L", "L"),
(16, 0): ("I;16", "I;16B"),
# Truecolour
(8, 2): ("RGB", "RGB"),
(16, 2): ("RGB", "RGB;16B"),
# Indexed-colour
(1, 3): ("P", "P;1"),
(2, 3): ("P", "P;2"),
(4, 3): ("P", "P;4"),
(8, 3): ("P", "P"),
# Grayscale with alpha
(8, 4): ("LA", "LA"),
(16, 4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available
# Truecolour with alpha
(8, 6): ("RGBA", "RGBA"),
(16, 6): ("RGBA", "RGBA;16B"),
}
_simple_palette = re.compile(b"^\xff*\x00\xff*$")
MAX_TEXT_CHUNK = ImageFile.SAFEBLOCK
"""
Maximum decompressed size for a iTXt or zTXt chunk.
Eliminates decompression bombs where compressed chunks can expand 1000x.
See :ref:`Text in PNG File Format`.
"""
MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK
"""
Set the maximum total text chunk size.
See :ref:`Text in PNG File Format`.
"""
# APNG frame disposal modes
class Disposal(IntEnum):
OP_NONE = 0
"""
No disposal is done on this frame before rendering the next frame.
See :ref:`Saving APNG sequences`.
"""
OP_BACKGROUND = 1
"""
This frame’s modified region is cleared to fully transparent black before rendering
the next frame.
See :ref:`Saving APNG sequences`.
"""
OP_PREVIOUS = 2
"""
This frame’s modified region is reverted to the previous frame’s contents before
rendering the next frame.
See :ref:`Saving APNG sequences`.
"""
# APNG frame blend modes
class Blend(IntEnum):
OP_SOURCE = 0
"""
All color components of this frame, including alpha, overwrite the previous output
image contents.
See :ref:`Saving APNG sequences`.
"""
OP_OVER = 1
"""
This frame should be alpha composited with the previous output image contents.
See :ref:`Saving APNG sequences`.
"""
def _safe_zlib_decompress(s: bytes) -> bytes:
dobj = zlib.decompressobj()
plaintext = dobj.decompress(s, MAX_TEXT_CHUNK)
if dobj.unconsumed_tail:
msg = "Decompressed data too large for PngImagePlugin.MAX_TEXT_CHUNK"
raise ValueError(msg)
return plaintext
def _crc32(data: bytes, seed: int = 0) -> int:
return zlib.crc32(data, seed) & 0xFFFFFFFF
# --------------------------------------------------------------------
# Support classes. Suitable for PNG and related formats like MNG etc.
class ChunkStream:
def __init__(self, fp: IO[bytes]) -> None:
self.fp: IO[bytes] | None = fp
self.queue: list[tuple[bytes, int, int]] | None = []
def read(self) -> tuple[bytes, int, int]:
"""Fetch a new chunk. Returns header information."""
cid = None
assert self.fp is not None
if self.queue:
cid, pos, length = self.queue.pop()
self.fp.seek(pos)
else:
s = self.fp.read(8)
cid = s[4:]
pos = self.fp.tell()
length = i32(s)
if not is_cid(cid):
if not ImageFile.LOAD_TRUNCATED_IMAGES:
msg = f"broken PNG file (chunk {repr(cid)})"
raise SyntaxError(msg)
return cid, pos, length
def __enter__(self) -> ChunkStream:
return self
def __exit__(self, *args: object) -> None:
self.close()
def close(self) -> None:
self.queue = self.fp = None
def push(self, cid: bytes, pos: int, length: int) -> None:
assert self.queue is not None
self.queue.append((cid, pos, length))
def call(self, cid: bytes, pos: int, length: int) -> bytes:
"""Call the appropriate chunk handler"""
logger.debug("STREAM %r %s %s", cid, pos, length)
return getattr(self, f"chunk_{cid.decode('ascii')}")(pos, length)
def crc(self, cid: bytes, data: bytes) -> None:
"""Read and verify checksum"""
# Skip CRC checks for ancillary chunks if allowed to load truncated
# images
# 5th byte of first char is 1 [specs, section 5.4]
if ImageFile.LOAD_TRUNCATED_IMAGES and (cid[0] >> 5 & 1):
self.crc_skip(cid, data)
return
assert self.fp is not None
try:
crc1 = _crc32(data, _crc32(cid))
crc2 = i32(self.fp.read(4))
if crc1 != crc2:
msg = f"broken PNG file (bad header checksum in {repr(cid)})"
raise SyntaxError(msg)
except struct.error as e:
msg = f"broken PNG file (incomplete checksum in {repr(cid)})"
raise SyntaxError(msg) from e
def crc_skip(self, cid: bytes, data: bytes) -> None:
"""Read checksum"""
assert self.fp is not None
self.fp.read(4)
def verify(self, endchunk: bytes = b"IEND") -> list[bytes]:
# Simple approach; just calculate checksum for all remaining
# blocks. Must be called directly after open.
cids = []
assert self.fp is not None
while True:
try:
cid, pos, length = self.read()
except struct.error as e:
msg = "truncated PNG file"
raise OSError(msg) from e
if cid == endchunk:
break
self.crc(cid, ImageFile._safe_read(self.fp, length))
cids.append(cid)
return cids
class iTXt(str):
"""
Subclass of string to allow iTXt chunks to look like strings while
keeping their extra information
"""
lang: str | bytes | None
tkey: str | bytes | None
@staticmethod
def __new__(
cls, text: str, lang: str | None = None, tkey: str | None = None
) -> iTXt:
"""
:param cls: the class to use when creating the instance
:param text: value for this key
:param lang: language code
:param tkey: UTF-8 version of the key name
"""
self = str.__new__(cls, text)
self.lang = lang
self.tkey = tkey
return self
class PngInfo:
"""
PNG chunk container (for use with save(pnginfo=))
"""
def __init__(self) -> None:
self.chunks: list[tuple[bytes, bytes, bool]] = []
def add(self, cid: bytes, data: bytes, after_idat: bool = False) -> None:
"""Appends an arbitrary chunk. Use with caution.
:param cid: a byte string, 4 bytes long.
:param data: a byte string of the encoded data
:param after_idat: for use with private chunks. Whether the chunk
should be written after IDAT
"""
self.chunks.append((cid, data, after_idat))
def add_itxt(
self,
key: str | bytes,
value: str | bytes,
lang: str | bytes = "",
tkey: str | bytes = "",
zip: bool = False,
) -> None:
"""Appends an iTXt chunk.
:param key: latin-1 encodable text key name
:param value: value for this key
:param lang: language code
:param tkey: UTF-8 version of the key name
:param zip: compression flag
"""
if not isinstance(key, bytes):
key = key.encode("latin-1", "strict")
if not isinstance(value, bytes):
value = value.encode("utf-8", "strict")
if not isinstance(lang, bytes):
lang = lang.encode("utf-8", "strict")
if not isinstance(tkey, bytes):
tkey = tkey.encode("utf-8", "strict")
if zip:
self.add(
b"iTXt",
key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + zlib.compress(value),
)
else:
self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + value)
def add_text(
self, key: str | bytes, value: str | bytes | iTXt, zip: bool = False
) -> None:
"""Appends a text chunk.
:param key: latin-1 encodable text key name
:param value: value for this key, text or an
:py:class:`PIL.PngImagePlugin.iTXt` instance
:param zip: compression flag
"""
if isinstance(value, iTXt):
return self.add_itxt(
key,
value,
value.lang if value.lang is not None else b"",
value.tkey if value.tkey is not None else b"",
zip=zip,
)
# The tEXt chunk stores latin-1 text
if not isinstance(value, bytes):
try:
value = value.encode("latin-1", "strict")
except UnicodeError:
return self.add_itxt(key, value, zip=zip)
if not isinstance(key, bytes):
key = key.encode("latin-1", "strict")
if zip:
self.add(b"zTXt", key + b"\0\0" + zlib.compress(value))
else:
self.add(b"tEXt", key + b"\0" + value)
# --------------------------------------------------------------------
# PNG image stream (IHDR/IEND)
class _RewindState(NamedTuple):
info: dict[str | tuple[int, int], Any]
tile: list[ImageFile._Tile]
seq_num: int | None
class PngStream(ChunkStream):
def __init__(self, fp: IO[bytes]) -> None:
super().__init__(fp)
# local copies of Image attributes
self.im_info: dict[str | tuple[int, int], Any] = {}
self.im_text: dict[str, str | iTXt] = {}
self.im_size = (0, 0)
self.im_mode = ""
self.im_tile: list[ImageFile._Tile] = []
self.im_palette: tuple[str, bytes] | None = None
self.im_custom_mimetype: str | None = None
self.im_n_frames: int | None = None
self._seq_num: int | None = None
self.rewind_state = _RewindState({}, [], None)
self.text_memory = 0
def check_text_memory(self, chunklen: int) -> None:
self.text_memory += chunklen
if self.text_memory > MAX_TEXT_MEMORY:
msg = (
"Too much memory used in text chunks: "
f"{self.text_memory}>MAX_TEXT_MEMORY"
)
raise ValueError(msg)
def save_rewind(self) -> None:
self.rewind_state = _RewindState(
self.im_info.copy(),
self.im_tile,
self._seq_num,
)
def rewind(self) -> None:
self.im_info = self.rewind_state.info.copy()
self.im_tile = self.rewind_state.tile
self._seq_num = self.rewind_state.seq_num
def chunk_iCCP(self, pos: int, length: int) -> bytes:
# ICC profile
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
# according to PNG spec, the iCCP chunk contains:
# Profile name 1-79 bytes (character string)
# Null separator 1 byte (null character)
# Compression method 1 byte (0)
# Compressed profile n bytes (zlib with deflate compression)
i = s.find(b"\0")
logger.debug("iCCP profile name %r", s[:i])
comp_method = s[i + 1]
logger.debug("Compression method %s", comp_method)
if comp_method != 0:
msg = f"Unknown compression method {comp_method} in iCCP chunk"
raise SyntaxError(msg)
try:
icc_profile = _safe_zlib_decompress(s[i + 2 :])
except ValueError:
if ImageFile.LOAD_TRUNCATED_IMAGES:
icc_profile = None
else:
raise
except zlib.error:
icc_profile = None # FIXME
self.im_info["icc_profile"] = icc_profile
return s
def chunk_IHDR(self, pos: int, length: int) -> bytes:
# image header
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
if length < 13:
if ImageFile.LOAD_TRUNCATED_IMAGES:
return s
msg = "Truncated IHDR chunk"
raise ValueError(msg)
self.im_size = i32(s, 0), i32(s, 4)
try:
self.im_mode, self.im_rawmode = _MODES[(s[8], s[9])]
except Exception:
pass
if s[12]:
self.im_info["interlace"] = 1
if s[11]:
msg = "unknown filter category"
raise SyntaxError(msg)
return s
def chunk_IDAT(self, pos: int, length: int) -> NoReturn:
# image data
if "bbox" in self.im_info:
tile = [ImageFile._Tile("zip", self.im_info["bbox"], pos, self.im_rawmode)]
else:
if self.im_n_frames is not None:
self.im_info["default_image"] = True
tile = [ImageFile._Tile("zip", (0, 0) + self.im_size, pos, self.im_rawmode)]
self.im_tile = tile
self.im_idat = length
msg = "image data found"
raise EOFError(msg)
def chunk_IEND(self, pos: int, length: int) -> NoReturn:
msg = "end of PNG image"
raise EOFError(msg)
def chunk_PLTE(self, pos: int, length: int) -> bytes:
# palette
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
if self.im_mode == "P":
self.im_palette = "RGB", s
return s
def chunk_tRNS(self, pos: int, length: int) -> bytes:
# transparency
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
if self.im_mode == "P":
if _simple_palette.match(s):
# tRNS contains only one full-transparent entry,
# other entries are full opaque
i = s.find(b"\0")
if i >= 0:
self.im_info["transparency"] = i
else:
# otherwise, we have a byte string with one alpha value
# for each palette entry
self.im_info["transparency"] = s
elif self.im_mode in ("1", "L", "I;16"):
self.im_info["transparency"] = i16(s)
elif self.im_mode == "RGB":
self.im_info["transparency"] = i16(s), i16(s, 2), i16(s, 4)
return s
def chunk_gAMA(self, pos: int, length: int) -> bytes:
# gamma setting
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
self.im_info["gamma"] = i32(s) / 100000.0
return s
def chunk_cHRM(self, pos: int, length: int) -> bytes:
# chromaticity, 8 unsigned ints, actual value is scaled by 100,000
# WP x,y, Red x,y, Green x,y Blue x,y
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
raw_vals = struct.unpack(f">{len(s) // 4}I", s)
self.im_info["chromaticity"] = tuple(elt / 100000.0 for elt in raw_vals)
return s
def chunk_sRGB(self, pos: int, length: int) -> bytes:
# srgb rendering intent, 1 byte
# 0 perceptual
# 1 relative colorimetric
# 2 saturation
# 3 absolute colorimetric
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
if length < 1:
if ImageFile.LOAD_TRUNCATED_IMAGES:
return s
msg = "Truncated sRGB chunk"
raise ValueError(msg)
self.im_info["srgb"] = s[0]
return s
def chunk_pHYs(self, pos: int, length: int) -> bytes:
# pixels per unit
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
if length < 9:
if ImageFile.LOAD_TRUNCATED_IMAGES:
return s
msg = "Truncated pHYs chunk"
raise ValueError(msg)
px, py = i32(s, 0), i32(s, 4)
unit = s[8]
if unit == 1: # meter
dpi = px * 0.0254, py * 0.0254
self.im_info["dpi"] = dpi
elif unit == 0:
self.im_info["aspect"] = px, py
return s
def chunk_tEXt(self, pos: int, length: int) -> bytes:
# text
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
try:
k, v = s.split(b"\0", 1)
except ValueError:
# fallback for broken tEXt tags
k = s
v = b""
if k:
k_str = k.decode("latin-1", "strict")
v_str = v.decode("latin-1", "replace")
self.im_info[k_str] = v if k == b"exif" else v_str
self.im_text[k_str] = v_str
self.check_text_memory(len(v_str))
return s
def chunk_zTXt(self, pos: int, length: int) -> bytes:
# compressed text
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
try:
k, v = s.split(b"\0", 1)
except ValueError:
k = s
v = b""
if v:
comp_method = v[0]
else:
comp_method = 0
if comp_method != 0:
msg = f"Unknown compression method {comp_method} in zTXt chunk"
raise SyntaxError(msg)
try:
v = _safe_zlib_decompress(v[1:])
except ValueError:
if ImageFile.LOAD_TRUNCATED_IMAGES:
v = b""
else:
raise
except zlib.error:
v = b""
if k:
k_str = k.decode("latin-1", "strict")
v_str = v.decode("latin-1", "replace")
self.im_info[k_str] = self.im_text[k_str] = v_str
self.check_text_memory(len(v_str))
return s
def chunk_iTXt(self, pos: int, length: int) -> bytes:
# international text
assert self.fp is not None
r = s = ImageFile._safe_read(self.fp, length)
try:
k, r = r.split(b"\0", 1)
except ValueError:
return s
if len(r) < 2:
return s
cf, cm, r = r[0], r[1], r[2:]
try:
lang, tk, v = r.split(b"\0", 2)
except ValueError:
return s
if cf != 0:
if cm == 0:
try:
v = _safe_zlib_decompress(v)
except ValueError:
if ImageFile.LOAD_TRUNCATED_IMAGES:
return s
else:
raise
except zlib.error:
return s
else:
return s
if k == b"XML:com.adobe.xmp":
self.im_info["xmp"] = v
try:
k_str = k.decode("latin-1", "strict")
lang_str = lang.decode("utf-8", "strict")
tk_str = tk.decode("utf-8", "strict")
v_str = v.decode("utf-8", "strict")
except UnicodeError:
return s
self.im_info[k_str] = self.im_text[k_str] = iTXt(v_str, lang_str, tk_str)
self.check_text_memory(len(v_str))
return s
def chunk_eXIf(self, pos: int, length: int) -> bytes:
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
self.im_info["exif"] = b"Exif\x00\x00" + s
return s
# APNG chunks
def chunk_acTL(self, pos: int, length: int) -> bytes:
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
if length < 8:
if ImageFile.LOAD_TRUNCATED_IMAGES:
return s
msg = "APNG contains truncated acTL chunk"
raise ValueError(msg)
if self.im_n_frames is not None:
self.im_n_frames = None
warnings.warn("Invalid APNG, will use default PNG image if possible")
return s
n_frames = i32(s)
if n_frames == 0 or n_frames > 0x80000000:
warnings.warn("Invalid APNG, will use default PNG image if possible")
return s
self.im_n_frames = n_frames
self.im_info["loop"] = i32(s, 4)
self.im_custom_mimetype = "image/apng"
return s
def chunk_fcTL(self, pos: int, length: int) -> bytes:
assert self.fp is not None
s = ImageFile._safe_read(self.fp, length)
if length < 26:
if ImageFile.LOAD_TRUNCATED_IMAGES:
return s
msg = "APNG contains truncated fcTL chunk"
raise ValueError(msg)
seq = i32(s)
if (self._seq_num is None and seq != 0) or (
self._seq_num is not None and self._seq_num != seq - 1
):
msg = "APNG contains frame sequence errors"
raise SyntaxError(msg)
self._seq_num = seq
width, height = i32(s, 4), i32(s, 8)
px, py = i32(s, 12), i32(s, 16)
im_w, im_h = self.im_size
if px + width > im_w or py + height > im_h:
msg = "APNG contains invalid frames"
raise SyntaxError(msg)
self.im_info["bbox"] = (px, py, px + width, py + height)
delay_num, delay_den = i16(s, 20), i16(s, 22)
if delay_den == 0:
delay_den = 100
self.im_info["duration"] = float(delay_num) / float(delay_den) * 1000
self.im_info["disposal"] = s[24]
self.im_info["blend"] = s[25]
return s
def chunk_fdAT(self, pos: int, length: int) -> bytes:
assert self.fp is not None
if length < 4:
if ImageFile.LOAD_TRUNCATED_IMAGES:
s = ImageFile._safe_read(self.fp, length)
return s
msg = "APNG contains truncated fDAT chunk"
raise ValueError(msg)
s = ImageFile._safe_read(self.fp, 4)
seq = i32(s)
if self._seq_num != seq - 1:
msg = "APNG contains frame sequence errors"
raise SyntaxError(msg)
self._seq_num = seq
return self.chunk_IDAT(pos + 4, length - 4)
# --------------------------------------------------------------------
# PNG reader
def _accept(prefix: bytes) -> bool:
return prefix.startswith(_MAGIC)
##
# Image plugin for PNG images.
class PngImageFile(ImageFile.ImageFile):
format = "PNG"
format_description = "Portable network graphics"
def _open(self) -> None:
if not _accept(self.fp.read(8)):
msg = "not a PNG file"
raise SyntaxError(msg)
self._fp = self.fp
self.__frame = 0
#
# Parse headers up to the first IDAT or fDAT chunk
self.private_chunks: list[tuple[bytes, bytes] | tuple[bytes, bytes, bool]] = []
self.png: PngStream | None = PngStream(self.fp)
while True:
#
# get next chunk
cid, pos, length = self.png.read()
try:
s = self.png.call(cid, pos, length)
except EOFError:
break
except AttributeError:
logger.debug("%r %s %s (unknown)", cid, pos, length)
s = ImageFile._safe_read(self.fp, length)
if cid[1:2].islower():
self.private_chunks.append((cid, s))
self.png.crc(cid, s)
#
# Copy relevant attributes from the PngStream. An alternative
# would be to let the PngStream class modify these attributes
# directly, but that introduces circular references which are
# difficult to break if things go wrong in the decoder...
# (believe me, I've tried ;-)
self._mode = self.png.im_mode
self._size = self.png.im_size
self.info = self.png.im_info
self._text: dict[str, str | iTXt] | None = None
self.tile = self.png.im_tile
self.custom_mimetype = self.png.im_custom_mimetype
self.n_frames = self.png.im_n_frames or 1
self.default_image = self.info.get("default_image", False)
if self.png.im_palette:
rawmode, data = self.png.im_palette
self.palette = ImagePalette.raw(rawmode, data)
if cid == b"fdAT":
self.__prepare_idat = length - 4
else:
self.__prepare_idat = length # used by load_prepare()
if self.png.im_n_frames is not None:
self._close_exclusive_fp_after_loading = False
self.png.save_rewind()
self.__rewind_idat = self.__prepare_idat
self.__rewind = self._fp.tell()
if self.default_image:
# IDAT chunk contains default image and not first animation frame
self.n_frames += 1
self._seek(0)
self.is_animated = self.n_frames > 1
@property
def text(self) -> dict[str, str | iTXt]:
# experimental
if self._text is None:
# iTxt, tEXt and zTXt chunks may appear at the end of the file
# So load the file to ensure that they are read
if self.is_animated:
frame = self.__frame
# for APNG, seek to the final frame before loading
self.seek(self.n_frames - 1)
self.load()
if self.is_animated:
self.seek(frame)
assert self._text is not None
return self._text
def verify(self) -> None:
"""Verify PNG file"""
if self.fp is None:
msg = "verify must be called directly after open"
raise RuntimeError(msg)
# back up to beginning of IDAT block
self.fp.seek(self.tile[0][2] - 8)
assert self.png is not None
self.png.verify()
self.png.close()
if self._exclusive_fp:
self.fp.close()
self.fp = None
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if frame < self.__frame:
self._seek(0, True)
last_frame = self.__frame
for f in range(self.__frame + 1, frame + 1):
try:
self._seek(f)
except EOFError as e:
self.seek(last_frame)
msg = "no more images in APNG file"
raise EOFError(msg) from e
def _seek(self, frame: int, rewind: bool = False) -> None:
assert self.png is not None
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.dispose: _imaging.ImagingCore | None
dispose_extent = None
if frame == 0:
if rewind:
self._fp.seek(self.__rewind)
self.png.rewind()
self.__prepare_idat = self.__rewind_idat
self._im = None
self.info = self.png.im_info
self.tile = self.png.im_tile
self.fp = self._fp
self._prev_im = None
self.dispose = None
self.default_image = self.info.get("default_image", False)
self.dispose_op = self.info.get("disposal")
self.blend_op = self.info.get("blend")
dispose_extent = self.info.get("bbox")
self.__frame = 0
else:
if frame != self.__frame + 1:
msg = f"cannot seek to frame {frame}"
raise ValueError(msg)
# ensure previous frame was loaded
self.load()
if self.dispose:
self.im.paste(self.dispose, self.dispose_extent)
self._prev_im = self.im.copy()
self.fp = self._fp
# advance to the next frame
if self.__prepare_idat:
ImageFile._safe_read(self.fp, self.__prepare_idat)
self.__prepare_idat = 0
frame_start = False
while True:
self.fp.read(4) # CRC
try:
cid, pos, length = self.png.read()
except (struct.error, SyntaxError):
break
if cid == b"IEND":
msg = "No more images in APNG file"
raise EOFError(msg)
if cid == b"fcTL":
if frame_start:
# there must be at least one fdAT chunk between fcTL chunks
msg = "APNG missing frame data"
raise SyntaxError(msg)
frame_start = True
try:
self.png.call(cid, pos, length)
except UnicodeDecodeError:
break
except EOFError:
if cid == b"fdAT":
length -= 4
if frame_start:
self.__prepare_idat = length
break
ImageFile._safe_read(self.fp, length)
except AttributeError:
logger.debug("%r %s %s (unknown)", cid, pos, length)
ImageFile._safe_read(self.fp, length)
self.__frame = frame
self.tile = self.png.im_tile
self.dispose_op = self.info.get("disposal")
self.blend_op = self.info.get("blend")
dispose_extent = self.info.get("bbox")
if not self.tile:
msg = "image not found in APNG frame"
raise EOFError(msg)
if dispose_extent:
self.dispose_extent: tuple[float, float, float, float] = dispose_extent
# setup frame disposal (actual disposal done when needed in the next _seek())
if self._prev_im is None and self.dispose_op == Disposal.OP_PREVIOUS:
self.dispose_op = Disposal.OP_BACKGROUND
self.dispose = None
if self.dispose_op == Disposal.OP_PREVIOUS:
if self._prev_im:
self.dispose = self._prev_im.copy()
self.dispose = self._crop(self.dispose, self.dispose_extent)
elif self.dispose_op == Disposal.OP_BACKGROUND:
self.dispose = Image.core.fill(self.mode, self.size)
self.dispose = self._crop(self.dispose, self.dispose_extent)
def tell(self) -> int:
return self.__frame
def load_prepare(self) -> None:
"""internal: prepare to read PNG file"""
if self.info.get("interlace"):
self.decoderconfig = self.decoderconfig + (1,)
self.__idat = self.__prepare_idat # used by load_read()
ImageFile.ImageFile.load_prepare(self)
def load_read(self, read_bytes: int) -> bytes:
"""internal: read more image data"""
assert self.png is not None
while self.__idat == 0:
# end of chunk, skip forward to next one
self.fp.read(4) # CRC
cid, pos, length = self.png.read()
if cid not in [b"IDAT", b"DDAT", b"fdAT"]:
self.png.push(cid, pos, length)
return b""
if cid == b"fdAT":
try:
self.png.call(cid, pos, length)
except EOFError:
pass
self.__idat = length - 4 # sequence_num has already been read
else:
self.__idat = length # empty chunks are allowed
# read more data from this chunk
if read_bytes <= 0:
read_bytes = self.__idat
else:
read_bytes = min(read_bytes, self.__idat)
self.__idat = self.__idat - read_bytes
return self.fp.read(read_bytes)
def load_end(self) -> None:
"""internal: finished reading image data"""
assert self.png is not None
if self.__idat != 0:
self.fp.read(self.__idat)
while True:
self.fp.read(4) # CRC
try:
cid, pos, length = self.png.read()
except (struct.error, SyntaxError):
break
if cid == b"IEND":
break
elif cid == b"fcTL" and self.is_animated:
# start of the next frame, stop reading
self.__prepare_idat = 0
self.png.push(cid, pos, length)
break
try:
self.png.call(cid, pos, length)
except UnicodeDecodeError:
break
except EOFError:
if cid == b"fdAT":
length -= 4
try:
ImageFile._safe_read(self.fp, length)
except OSError as e:
if ImageFile.LOAD_TRUNCATED_IMAGES:
break
else:
raise e
except AttributeError:
logger.debug("%r %s %s (unknown)", cid, pos, length)
s = ImageFile._safe_read(self.fp, length)
if cid[1:2].islower():
self.private_chunks.append((cid, s, True))
self._text = self.png.im_text
if not self.is_animated:
self.png.close()
self.png = None
else:
if self._prev_im and self.blend_op == Blend.OP_OVER:
updated = self._crop(self.im, self.dispose_extent)
if self.im.mode == "RGB" and "transparency" in self.info:
mask = updated.convert_transparent(
"RGBA", self.info["transparency"]
)
else:
if self.im.mode == "P" and "transparency" in self.info:
t = self.info["transparency"]
if isinstance(t, bytes):
updated.putpalettealphas(t)
elif isinstance(t, int):
updated.putpalettealpha(t)
mask = updated.convert("RGBA")
self._prev_im.paste(updated, self.dispose_extent, mask)
self.im = self._prev_im
def _getexif(self) -> dict[int, Any] | None:
if "exif" not in self.info:
self.load()
if "exif" not in self.info and "Raw profile type exif" not in self.info:
return None
return self.getexif()._get_merged_dict()
def getexif(self) -> Image.Exif:
if "exif" not in self.info:
self.load()
return super().getexif()
# --------------------------------------------------------------------
# PNG writer
_OUTMODES = {
# supported PIL modes, and corresponding rawmode, bit depth and color type
"1": ("1", b"\x01", b"\x00"),
"L;1": ("L;1", b"\x01", b"\x00"),
"L;2": ("L;2", b"\x02", b"\x00"),
"L;4": ("L;4", b"\x04", b"\x00"),
"L": ("L", b"\x08", b"\x00"),
"LA": ("LA", b"\x08", b"\x04"),
"I": ("I;16B", b"\x10", b"\x00"),
"I;16": ("I;16B", b"\x10", b"\x00"),
"I;16B": ("I;16B", b"\x10", b"\x00"),
"P;1": ("P;1", b"\x01", b"\x03"),
"P;2": ("P;2", b"\x02", b"\x03"),
"P;4": ("P;4", b"\x04", b"\x03"),
"P": ("P", b"\x08", b"\x03"),
"RGB": ("RGB", b"\x08", b"\x02"),
"RGBA": ("RGBA", b"\x08", b"\x06"),
}
def putchunk(fp: IO[bytes], cid: bytes, *data: bytes) -> None:
"""Write a PNG chunk (including CRC field)"""
byte_data = b"".join(data)
fp.write(o32(len(byte_data)) + cid)
fp.write(byte_data)
crc = _crc32(byte_data, _crc32(cid))
fp.write(o32(crc))
class _idat:
# wrap output from the encoder in IDAT chunks
def __init__(self, fp: IO[bytes], chunk: Callable[..., None]) -> None:
self.fp = fp
self.chunk = chunk
def write(self, data: bytes) -> None:
self.chunk(self.fp, b"IDAT", data)
class _fdat:
# wrap encoder output in fdAT chunks
def __init__(self, fp: IO[bytes], chunk: Callable[..., None], seq_num: int) -> None:
self.fp = fp
self.chunk = chunk
self.seq_num = seq_num
def write(self, data: bytes) -> None:
self.chunk(self.fp, b"fdAT", o32(self.seq_num), data)
self.seq_num += 1
class _Frame(NamedTuple):
im: Image.Image
bbox: tuple[int, int, int, int] | None
encoderinfo: dict[str, Any]
def _write_multiple_frames(
im: Image.Image,
fp: IO[bytes],
chunk: Callable[..., None],
mode: str,
rawmode: str,
default_image: Image.Image | None,
append_images: list[Image.Image],
) -> Image.Image | None:
duration = im.encoderinfo.get("duration")
loop = im.encoderinfo.get("loop", im.info.get("loop", 0))
disposal = im.encoderinfo.get("disposal", im.info.get("disposal", Disposal.OP_NONE))
blend = im.encoderinfo.get("blend", im.info.get("blend", Blend.OP_SOURCE))
if default_image:
chain = itertools.chain(append_images)
else:
chain = itertools.chain([im], append_images)
im_frames: list[_Frame] = []
frame_count = 0
for im_seq in chain:
for im_frame in ImageSequence.Iterator(im_seq):
if im_frame.mode == mode:
im_frame = im_frame.copy()
else:
im_frame = im_frame.convert(mode)
encoderinfo = im.encoderinfo.copy()
if isinstance(duration, (list, tuple)):
encoderinfo["duration"] = duration[frame_count]
elif duration is None and "duration" in im_frame.info:
encoderinfo["duration"] = im_frame.info["duration"]
if isinstance(disposal, (list, tuple)):
encoderinfo["disposal"] = disposal[frame_count]
if isinstance(blend, (list, tuple)):
encoderinfo["blend"] = blend[frame_count]
frame_count += 1
if im_frames:
previous = im_frames[-1]
prev_disposal = previous.encoderinfo.get("disposal")
prev_blend = previous.encoderinfo.get("blend")
if prev_disposal == Disposal.OP_PREVIOUS and len(im_frames) < 2:
prev_disposal = Disposal.OP_BACKGROUND
if prev_disposal == Disposal.OP_BACKGROUND:
base_im = previous.im.copy()
dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0))
bbox = previous.bbox
if bbox:
dispose = dispose.crop(bbox)
else:
bbox = (0, 0) + im.size
base_im.paste(dispose, bbox)
elif prev_disposal == Disposal.OP_PREVIOUS:
base_im = im_frames[-2].im
else:
base_im = previous.im
delta = ImageChops.subtract_modulo(
im_frame.convert("RGBA"), base_im.convert("RGBA")
)
bbox = delta.getbbox(alpha_only=False)
if (
not bbox
and prev_disposal == encoderinfo.get("disposal")
and prev_blend == encoderinfo.get("blend")
and "duration" in encoderinfo
):
previous.encoderinfo["duration"] += encoderinfo["duration"]
continue
else:
bbox = None
im_frames.append(_Frame(im_frame, bbox, encoderinfo))
if len(im_frames) == 1 and not default_image:
return im_frames[0].im
# animation control
chunk(
fp,
b"acTL",
o32(len(im_frames)), # 0: num_frames
o32(loop), # 4: num_plays
)
# default image IDAT (if it exists)
if default_image:
if im.mode != mode:
im = im.convert(mode)
ImageFile._save(
im,
cast(IO[bytes], _idat(fp, chunk)),
[ImageFile._Tile("zip", (0, 0) + im.size, 0, rawmode)],
)
seq_num = 0
for frame, frame_data in enumerate(im_frames):
im_frame = frame_data.im
if not frame_data.bbox:
bbox = (0, 0) + im_frame.size
else:
bbox = frame_data.bbox
im_frame = im_frame.crop(bbox)
size = im_frame.size
encoderinfo = frame_data.encoderinfo
frame_duration = int(round(encoderinfo.get("duration", 0)))
frame_disposal = encoderinfo.get("disposal", disposal)
frame_blend = encoderinfo.get("blend", blend)
# frame control
chunk(
fp,
b"fcTL",
o32(seq_num), # sequence_number
o32(size[0]), # width
o32(size[1]), # height
o32(bbox[0]), # x_offset
o32(bbox[1]), # y_offset
o16(frame_duration), # delay_numerator
o16(1000), # delay_denominator
o8(frame_disposal), # dispose_op
o8(frame_blend), # blend_op
)
seq_num += 1
# frame data
if frame == 0 and not default_image:
# first frame must be in IDAT chunks for backwards compatibility
ImageFile._save(
im_frame,
cast(IO[bytes], _idat(fp, chunk)),
[ImageFile._Tile("zip", (0, 0) + im_frame.size, 0, rawmode)],
)
else:
fdat_chunks = _fdat(fp, chunk, seq_num)
ImageFile._save(
im_frame,
cast(IO[bytes], fdat_chunks),
[ImageFile._Tile("zip", (0, 0) + im_frame.size, 0, rawmode)],
)
seq_num = fdat_chunks.seq_num
return None
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
_save(im, fp, filename, save_all=True)
def _save(
im: Image.Image,
fp: IO[bytes],
filename: str | bytes,
chunk: Callable[..., None] = putchunk,
save_all: bool = False,
) -> None:
# save an image to disk (called by the save method)
if save_all:
default_image = im.encoderinfo.get(
"default_image", im.info.get("default_image")
)
modes = set()
sizes = set()
append_images = im.encoderinfo.get("append_images", [])
for im_seq in itertools.chain([im], append_images):
for im_frame in ImageSequence.Iterator(im_seq):
modes.add(im_frame.mode)
sizes.add(im_frame.size)
for mode in ("RGBA", "RGB", "P"):
if mode in modes:
break
else:
mode = modes.pop()
size = tuple(max(frame_size[i] for frame_size in sizes) for i in range(2))
else:
size = im.size
mode = im.mode
outmode = mode
if mode == "P":
#
# attempt to minimize storage requirements for palette images
if "bits" in im.encoderinfo:
# number of bits specified by user
colors = min(1 << im.encoderinfo["bits"], 256)
else:
# check palette contents
if im.palette:
colors = max(min(len(im.palette.getdata()[1]) // 3, 256), 1)
else:
colors = 256
if colors <= 16:
if colors <= 2:
bits = 1
elif colors <= 4:
bits = 2
else:
bits = 4
outmode += f";{bits}"
# encoder options
im.encoderconfig = (
im.encoderinfo.get("optimize", False),
im.encoderinfo.get("compress_level", -1),
im.encoderinfo.get("compress_type", -1),
im.encoderinfo.get("dictionary", b""),
)
# get the corresponding PNG mode
try:
rawmode, bit_depth, color_type = _OUTMODES[outmode]
except KeyError as e:
msg = f"cannot write mode {mode} as PNG"
raise OSError(msg) from e
if outmode == "I":
deprecate("Saving I mode images as PNG", 13, stacklevel=4)
#
# write minimal PNG file
fp.write(_MAGIC)
chunk(
fp,
b"IHDR",
o32(size[0]), # 0: size
o32(size[1]),
bit_depth,
color_type,
b"\0", # 10: compression
b"\0", # 11: filter category
b"\0", # 12: interlace flag
)
chunks = [b"cHRM", b"cICP", b"gAMA", b"sBIT", b"sRGB", b"tIME"]
icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile"))
if icc:
# ICC profile
# according to PNG spec, the iCCP chunk contains:
# Profile name 1-79 bytes (character string)
# Null separator 1 byte (null character)
# Compression method 1 byte (0)
# Compressed profile n bytes (zlib with deflate compression)
name = b"ICC Profile"
data = name + b"\0\0" + zlib.compress(icc)
chunk(fp, b"iCCP", data)
# You must either have sRGB or iCCP.
# Disallow sRGB chunks when an iCCP-chunk has been emitted.
chunks.remove(b"sRGB")
info = im.encoderinfo.get("pnginfo")
if info:
chunks_multiple_allowed = [b"sPLT", b"iTXt", b"tEXt", b"zTXt"]
for info_chunk in info.chunks:
cid, data = info_chunk[:2]
if cid in chunks:
chunks.remove(cid)
chunk(fp, cid, data)
elif cid in chunks_multiple_allowed:
chunk(fp, cid, data)
elif cid[1:2].islower():
# Private chunk
after_idat = len(info_chunk) == 3 and info_chunk[2]
if not after_idat:
chunk(fp, cid, data)
if im.mode == "P":
palette_byte_number = colors * 3
palette_bytes = im.im.getpalette("RGB")[:palette_byte_number]
while len(palette_bytes) < palette_byte_number:
palette_bytes += b"\0"
chunk(fp, b"PLTE", palette_bytes)
transparency = im.encoderinfo.get("transparency", im.info.get("transparency", None))
if transparency or transparency == 0:
if im.mode == "P":
# limit to actual palette size
alpha_bytes = colors
if isinstance(transparency, bytes):
chunk(fp, b"tRNS", transparency[:alpha_bytes])
else:
transparency = max(0, min(255, transparency))
alpha = b"\xff" * transparency + b"\0"
chunk(fp, b"tRNS", alpha[:alpha_bytes])
elif im.mode in ("1", "L", "I", "I;16"):
transparency = max(0, min(65535, transparency))
chunk(fp, b"tRNS", o16(transparency))
elif im.mode == "RGB":
red, green, blue = transparency
chunk(fp, b"tRNS", o16(red) + o16(green) + o16(blue))
else:
if "transparency" in im.encoderinfo:
# don't bother with transparency if it's an RGBA
# and it's in the info dict. It's probably just stale.
msg = "cannot use transparency for this mode"
raise OSError(msg)
else:
if im.mode == "P" and im.im.getpalettemode() == "RGBA":
alpha = im.im.getpalette("RGBA", "A")
alpha_bytes = colors
chunk(fp, b"tRNS", alpha[:alpha_bytes])
dpi = im.encoderinfo.get("dpi")
if dpi:
chunk(
fp,
b"pHYs",
o32(int(dpi[0] / 0.0254 + 0.5)),
o32(int(dpi[1] / 0.0254 + 0.5)),
b"\x01",
)
if info:
chunks = [b"bKGD", b"hIST"]
for info_chunk in info.chunks:
cid, data = info_chunk[:2]
if cid in chunks:
chunks.remove(cid)
chunk(fp, cid, data)
exif = im.encoderinfo.get("exif")
if exif:
if isinstance(exif, Image.Exif):
exif = exif.tobytes(8)
if exif.startswith(b"Exif\x00\x00"):
exif = exif[6:]
chunk(fp, b"eXIf", exif)
single_im: Image.Image | None = im
if save_all:
single_im = _write_multiple_frames(
im, fp, chunk, mode, rawmode, default_image, append_images
)
if single_im:
ImageFile._save(
single_im,
cast(IO[bytes], _idat(fp, chunk)),
[ImageFile._Tile("zip", (0, 0) + single_im.size, 0, rawmode)],
)
if info:
for info_chunk in info.chunks:
cid, data = info_chunk[:2]
if cid[1:2].islower():
# Private chunk
after_idat = len(info_chunk) == 3 and info_chunk[2]
if after_idat:
chunk(fp, cid, data)
chunk(fp, b"IEND", b"")
if hasattr(fp, "flush"):
fp.flush()
# --------------------------------------------------------------------
# PNG chunk converter
def getchunks(im: Image.Image, **params: Any) -> list[tuple[bytes, bytes, bytes]]:
"""Return a list of PNG chunks representing this image."""
from io import BytesIO
chunks = []
def append(fp: IO[bytes], cid: bytes, *data: bytes) -> None:
byte_data = b"".join(data)
crc = o32(_crc32(byte_data, _crc32(cid)))
chunks.append((cid, byte_data, crc))
fp = BytesIO()
try:
im.encoderinfo = params
_save(im, fp, "", append)
finally:
del im.encoderinfo
return chunks
# --------------------------------------------------------------------
# Registry
Image.register_open(PngImageFile.format, PngImageFile, _accept)
Image.register_save(PngImageFile.format, _save)
Image.register_save_all(PngImageFile.format, _save_all)
Image.register_extensions(PngImageFile.format, [".png", ".apng"])
Image.register_mime(PngImageFile.format, "image/png")
venv\Lib\site-packages\PIL\PpmImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# PPM support for PIL
#
# History:
# 96-03-24 fl Created
# 98-03-06 fl Write RGBA images (as RGB, that is)
#
# Copyright (c) Secret Labs AB 1997-98.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import math
from typing import IO
from . import Image, ImageFile
from ._binary import i16be as i16
from ._binary import o8
from ._binary import o32le as o32
#
# --------------------------------------------------------------------
b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d"
MODES = {
# standard
b"P1": "1",
b"P2": "L",
b"P3": "RGB",
b"P4": "1",
b"P5": "L",
b"P6": "RGB",
# extensions
b"P0CMYK": "CMYK",
b"Pf": "F",
# PIL extensions (for test purposes only)
b"PyP": "P",
b"PyRGBA": "RGBA",
b"PyCMYK": "CMYK",
}
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"P") and prefix[1] in b"0123456fy"
##
# Image plugin for PBM, PGM, and PPM images.
class PpmImageFile(ImageFile.ImageFile):
format = "PPM"
format_description = "Pbmplus image"
def _read_magic(self) -> bytes:
assert self.fp is not None
magic = b""
# read until whitespace or longest available magic number
for _ in range(6):
c = self.fp.read(1)
if not c or c in b_whitespace:
break
magic += c
return magic
def _read_token(self) -> bytes:
assert self.fp is not None
token = b""
while len(token) <= 10: # read until next whitespace or limit of 10 characters
c = self.fp.read(1)
if not c:
break
elif c in b_whitespace: # token ended
if not token:
# skip whitespace at start
continue
break
elif c == b"#":
# ignores rest of the line; stops at CR, LF or EOF
while self.fp.read(1) not in b"\r\n":
pass
continue
token += c
if not token:
# Token was not even 1 byte
msg = "Reached EOF while reading header"
raise ValueError(msg)
elif len(token) > 10:
msg_too_long = b"Token too long in file header: %s" % token
raise ValueError(msg_too_long)
return token
def _open(self) -> None:
assert self.fp is not None
magic_number = self._read_magic()
try:
mode = MODES[magic_number]
except KeyError:
msg = "not a PPM file"
raise SyntaxError(msg)
self._mode = mode
if magic_number in (b"P1", b"P4"):
self.custom_mimetype = "image/x-portable-bitmap"
elif magic_number in (b"P2", b"P5"):
self.custom_mimetype = "image/x-portable-graymap"
elif magic_number in (b"P3", b"P6"):
self.custom_mimetype = "image/x-portable-pixmap"
self._size = int(self._read_token()), int(self._read_token())
decoder_name = "raw"
if magic_number in (b"P1", b"P2", b"P3"):
decoder_name = "ppm_plain"
args: str | tuple[str | int, ...]
if mode == "1":
args = "1;I"
elif mode == "F":
scale = float(self._read_token())
if scale == 0.0 or not math.isfinite(scale):
msg = "scale must be finite and non-zero"
raise ValueError(msg)
self.info["scale"] = abs(scale)
rawmode = "F;32F" if scale < 0 else "F;32BF"
args = (rawmode, 0, -1)
else:
maxval = int(self._read_token())
if not 0 < maxval < 65536:
msg = "maxval must be greater than 0 and less than 65536"
raise ValueError(msg)
if maxval > 255 and mode == "L":
self._mode = "I"
rawmode = mode
if decoder_name != "ppm_plain":
# If maxval matches a bit depth, use the raw decoder directly
if maxval == 65535 and mode == "L":
rawmode = "I;16B"
elif maxval != 255:
decoder_name = "ppm"
args = rawmode if decoder_name == "raw" else (rawmode, maxval)
self.tile = [
ImageFile._Tile(decoder_name, (0, 0) + self.size, self.fp.tell(), args)
]
#
# --------------------------------------------------------------------
class PpmPlainDecoder(ImageFile.PyDecoder):
_pulls_fd = True
_comment_spans: bool
def _read_block(self) -> bytes:
assert self.fd is not None
return self.fd.read(ImageFile.SAFEBLOCK)
def _find_comment_end(self, block: bytes, start: int = 0) -> int:
a = block.find(b"\n", start)
b = block.find(b"\r", start)
return min(a, b) if a * b > 0 else max(a, b) # lowest nonnegative index (or -1)
def _ignore_comments(self, block: bytes) -> bytes:
if self._comment_spans:
# Finish current comment
while block:
comment_end = self._find_comment_end(block)
if comment_end != -1:
# Comment ends in this block
# Delete tail of comment
block = block[comment_end + 1 :]
break
else:
# Comment spans whole block
# So read the next block, looking for the end
block = self._read_block()
# Search for any further comments
self._comment_spans = False
while True:
comment_start = block.find(b"#")
if comment_start == -1:
# No comment found
break
comment_end = self._find_comment_end(block, comment_start)
if comment_end != -1:
# Comment ends in this block
# Delete comment
block = block[:comment_start] + block[comment_end + 1 :]
else:
# Comment continues to next block(s)
block = block[:comment_start]
self._comment_spans = True
break
return block
def _decode_bitonal(self) -> bytearray:
"""
This is a separate method because in the plain PBM format, all data tokens are
exactly one byte, so the inter-token whitespace is optional.
"""
data = bytearray()
total_bytes = self.state.xsize * self.state.ysize
while len(data) != total_bytes:
block = self._read_block() # read next block
if not block:
# eof
break
block = self._ignore_comments(block)
tokens = b"".join(block.split())
for token in tokens:
if token not in (48, 49):
msg = b"Invalid token for this mode: %s" % bytes([token])
raise ValueError(msg)
data = (data + tokens)[:total_bytes]
invert = bytes.maketrans(b"01", b"\xff\x00")
return data.translate(invert)
def _decode_blocks(self, maxval: int) -> bytearray:
data = bytearray()
max_len = 10
out_byte_count = 4 if self.mode == "I" else 1
out_max = 65535 if self.mode == "I" else 255
bands = Image.getmodebands(self.mode)
total_bytes = self.state.xsize * self.state.ysize * bands * out_byte_count
half_token = b""
while len(data) != total_bytes:
block = self._read_block() # read next block
if not block:
if half_token:
block = bytearray(b" ") # flush half_token
else:
# eof
break
block = self._ignore_comments(block)
if half_token:
block = half_token + block # stitch half_token to new block
half_token = b""
tokens = block.split()
if block and not block[-1:].isspace(): # block might split token
half_token = tokens.pop() # save half token for later
if len(half_token) > max_len: # prevent buildup of half_token
msg = (
b"Token too long found in data: %s" % half_token[: max_len + 1]
)
raise ValueError(msg)
for token in tokens:
if len(token) > max_len:
msg = b"Token too long found in data: %s" % token[: max_len + 1]
raise ValueError(msg)
value = int(token)
if value < 0:
msg_str = f"Channel value is negative: {value}"
raise ValueError(msg_str)
if value > maxval:
msg_str = f"Channel value too large for this mode: {value}"
raise ValueError(msg_str)
value = round(value / maxval * out_max)
data += o32(value) if self.mode == "I" else o8(value)
if len(data) == total_bytes: # finished!
break
return data
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
self._comment_spans = False
if self.mode == "1":
data = self._decode_bitonal()
rawmode = "1;8"
else:
maxval = self.args[-1]
data = self._decode_blocks(maxval)
rawmode = "I;32" if self.mode == "I" else self.mode
self.set_as_raw(bytes(data), rawmode)
return -1, 0
class PpmDecoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
data = bytearray()
maxval = self.args[-1]
in_byte_count = 1 if maxval < 256 else 2
out_byte_count = 4 if self.mode == "I" else 1
out_max = 65535 if self.mode == "I" else 255
bands = Image.getmodebands(self.mode)
dest_length = self.state.xsize * self.state.ysize * bands * out_byte_count
while len(data) < dest_length:
pixels = self.fd.read(in_byte_count * bands)
if len(pixels) < in_byte_count * bands:
# eof
break
for b in range(bands):
value = (
pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count)
)
value = min(out_max, round(value / maxval * out_max))
data += o32(value) if self.mode == "I" else o8(value)
rawmode = "I;32" if self.mode == "I" else self.mode
self.set_as_raw(bytes(data), rawmode)
return -1, 0
#
# --------------------------------------------------------------------
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode == "1":
rawmode, head = "1;I", b"P4"
elif im.mode == "L":
rawmode, head = "L", b"P5"
elif im.mode in ("I", "I;16"):
rawmode, head = "I;16B", b"P5"
elif im.mode in ("RGB", "RGBA"):
rawmode, head = "RGB", b"P6"
elif im.mode == "F":
rawmode, head = "F;32F", b"Pf"
else:
msg = f"cannot write mode {im.mode} as PPM"
raise OSError(msg)
fp.write(head + b"\n%d %d\n" % im.size)
if head == b"P6":
fp.write(b"255\n")
elif head == b"P5":
if rawmode == "L":
fp.write(b"255\n")
else:
fp.write(b"65535\n")
elif head == b"Pf":
fp.write(b"-1.0\n")
row_order = -1 if im.mode == "F" else 1
ImageFile._save(
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, row_order))]
)
#
# --------------------------------------------------------------------
Image.register_open(PpmImageFile.format, PpmImageFile, _accept)
Image.register_save(PpmImageFile.format, _save)
Image.register_decoder("ppm", PpmDecoder)
Image.register_decoder("ppm_plain", PpmPlainDecoder)
Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm", ".pfm"])
Image.register_mime(PpmImageFile.format, "image/x-portable-anymap")
venv\Lib\site-packages\PIL\PsdImagePlugin.py
#
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
from functools import cached_property
from typing import IO
from . import Image, ImageFile, ImagePalette
from ._binary import i8
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._binary import si16be as si16
from ._binary import si32be as si32
from ._util import DeferredError
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3),
}
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"8BPS")
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
read = self.fp.read
#
# header
s = read(26)
if not _accept(s) or i16(s, 4) != 1:
msg = "not a PSD file"
raise SyntaxError(msg)
psd_bits = i16(s, 22)
psd_channels = i16(s, 12)
psd_mode = i16(s, 24)
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
msg = "not enough channels"
raise OSError(msg)
if mode == "RGB" and psd_channels == 4:
mode = "RGBA"
channels = 4
self._mode = mode
self._size = i32(s, 18), i32(s, 14)
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
read(4) # signature
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if len(data) & 1:
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self._layers_position = None
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self._layers_position = self.fp.tell()
self._layers_size = size
self.fp.seek(end)
self._n_frames: int | None = None
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 1
self._min_frame = 1
@cached_property
def layers(
self,
) -> list[tuple[str, str, tuple[int, int, int, int], list[ImageFile._Tile]]]:
layers = []
if self._layers_position is not None:
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self._fp.seek(self._layers_position)
_layer_data = io.BytesIO(ImageFile._safe_read(self._fp, self._layers_size))
layers = _layerinfo(_layer_data, self._layers_size)
self._n_frames = len(layers)
return layers
@property
def n_frames(self) -> int:
if self._n_frames is None:
self._n_frames = len(self.layers)
return self._n_frames
@property
def is_animated(self) -> bool:
return len(self.layers) > 1
def seek(self, layer: int) -> None:
if not self._seek_check(layer):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
# seek to given layer (1..max)
_, mode, _, tile = self.layers[layer - 1]
self._mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
def tell(self) -> int:
# return layer number (0=image, 1..max=layers)
return self.frame
def _layerinfo(
fp: IO[bytes], ct_bytes: int
) -> list[tuple[str, str, tuple[int, int, int, int], list[ImageFile._Tile]]]:
# read layerinfo block
layers = []
def read(size: int) -> bytes:
return ImageFile._safe_read(fp, size)
ct = si16(read(2))
# sanity check
if ct_bytes < (abs(ct) * 20):
msg = "Layer block too short for number of layers requested"
raise SyntaxError(msg)
for _ in range(abs(ct)):
# bounding box
y0 = si32(read(4))
x0 = si32(read(4))
y1 = si32(read(4))
x1 = si32(read(4))
# image info
bands = []
ct_types = i16(read(2))
if ct_types > 4:
fp.seek(ct_types * 6 + 12, io.SEEK_CUR)
size = i32(read(4))
fp.seek(size, io.SEEK_CUR)
continue
for _ in range(ct_types):
type = i16(read(2))
if type == 65535:
b = "A"
else:
b = "RGBA"[type]
bands.append(b)
read(4) # size
# figure out the image mode
bands.sort()
if bands == ["R"]:
mode = "L"
elif bands == ["B", "G", "R"]:
mode = "RGB"
elif bands == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = "" # unknown
# skip over blend flags and extra information
read(12) # filler
name = ""
size = i32(read(4)) # length of the extra data field
if size:
data_end = fp.tell() + size
length = i32(read(4))
if length:
fp.seek(length - 16, io.SEEK_CUR)
length = i32(read(4))
if length:
fp.seek(length, io.SEEK_CUR)
length = i8(read(1))
if length:
# Don't know the proper encoding,
# Latin-1 should be a good guess
name = read(length).decode("latin-1", "replace")
fp.seek(data_end)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
layerinfo = []
for i, (name, mode, bbox) in enumerate(layers):
tile = []
for m in mode:
t = _maketile(fp, m, bbox, 1)
if t:
tile.extend(t)
layerinfo.append((name, mode, bbox, tile))
return layerinfo
def _maketile(
file: IO[bytes], mode: str, bbox: tuple[int, int, int, int], channels: int
) -> list[ImageFile._Tile]:
tiles = []
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tiles.append(ImageFile._Tile("raw", bbox, offset, layer))
offset = offset + xsize * ysize
elif compression == 1:
#
# packbits compression
i = 0
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tiles.append(ImageFile._Tile("packbits", bbox, offset, layer))
for y in range(ysize):
offset = offset + i16(bytecount, i)
i += 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tiles
# --------------------------------------------------------------------
# registry
Image.register_open(PsdImageFile.format, PsdImageFile, _accept)
Image.register_extension(PsdImageFile.format, ".psd")
Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop")
venv\Lib\site-packages\PIL\PSDraw.py
#
# The Python Imaging Library
# $Id$
#
# Simple PostScript graphics interface
#
# History:
# 1996-04-20 fl Created
# 1999-01-10 fl Added gsave/grestore to image method
# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge)
#
# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved.
# Copyright (c) 1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import sys
from typing import IO
from . import EpsImagePlugin
TYPE_CHECKING = False
##
# Simple PostScript graphics interface.
class PSDraw:
"""
Sets up printing to the given file. If ``fp`` is omitted,
``sys.stdout.buffer`` is assumed.
"""
def __init__(self, fp: IO[bytes] | None = None) -> None:
if not fp:
fp = sys.stdout.buffer
self.fp = fp
def begin_document(self, id: str | None = None) -> None:
"""Set up printing of a document. (Write PostScript DSC header.)"""
# FIXME: incomplete
self.fp.write(
b"%!PS-Adobe-3.0\n"
b"save\n"
b"/showpage { } def\n"
b"%%EndComments\n"
b"%%BeginDocument\n"
)
# self.fp.write(ERROR_PS) # debugging!
self.fp.write(EDROFF_PS)
self.fp.write(VDI_PS)
self.fp.write(b"%%EndProlog\n")
self.isofont: dict[bytes, int] = {}
def end_document(self) -> None:
"""Ends printing. (Write PostScript DSC footer.)"""
self.fp.write(b"%%EndDocument\nrestore showpage\n%%End\n")
if hasattr(self.fp, "flush"):
self.fp.flush()
def setfont(self, font: str, size: int) -> None:
"""
Selects which font to use.
:param font: A PostScript font name
:param size: Size in points.
"""
font_bytes = bytes(font, "UTF-8")
if font_bytes not in self.isofont:
# reencode font
self.fp.write(
b"/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font_bytes, font_bytes)
)
self.isofont[font_bytes] = 1
# rough
self.fp.write(b"/F0 %d /PSDraw-%s F\n" % (size, font_bytes))
def line(self, xy0: tuple[int, int], xy1: tuple[int, int]) -> None:
"""
Draws a line between the two points. Coordinates are given in
PostScript point coordinates (72 points per inch, (0, 0) is the lower
left corner of the page).
"""
self.fp.write(b"%d %d %d %d Vl\n" % (*xy0, *xy1))
def rectangle(self, box: tuple[int, int, int, int]) -> None:
"""
Draws a rectangle.
:param box: A tuple of four integers, specifying left, bottom, width and
height.
"""
self.fp.write(b"%d %d M 0 %d %d Vr\n" % box)
def text(self, xy: tuple[int, int], text: str) -> None:
"""
Draws text at the given position. You must use
:py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method.
"""
text_bytes = bytes(text, "UTF-8")
text_bytes = b"\\(".join(text_bytes.split(b"("))
text_bytes = b"\\)".join(text_bytes.split(b")"))
self.fp.write(b"%d %d M (%s) S\n" % (xy + (text_bytes,)))
if TYPE_CHECKING:
from . import Image
def image(
self, box: tuple[int, int, int, int], im: Image.Image, dpi: int | None = None
) -> None:
"""Draw a PIL image, centered in the given box."""
# default resolution depends on mode
if not dpi:
if im.mode == "1":
dpi = 200 # fax
else:
dpi = 100 # grayscale
# image size (on paper)
x = im.size[0] * 72 / dpi
y = im.size[1] * 72 / dpi
# max allowed size
xmax = float(box[2] - box[0])
ymax = float(box[3] - box[1])
if x > xmax:
y = y * xmax / x
x = xmax
if y > ymax:
x = x * ymax / y
y = ymax
dx = (xmax - x) / 2 + box[0]
dy = (ymax - y) / 2 + box[1]
self.fp.write(b"gsave\n%f %f translate\n" % (dx, dy))
if (x, y) != im.size:
# EpsImagePlugin._save prints the image at (0,0,xsize,ysize)
sx = x / im.size[0]
sy = y / im.size[1]
self.fp.write(b"%f %f scale\n" % (sx, sy))
EpsImagePlugin._save(im, self.fp, "", 0)
self.fp.write(b"\ngrestore\n")
# --------------------------------------------------------------------
# PostScript driver
#
# EDROFF.PS -- PostScript driver for Edroff 2
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
EDROFF_PS = b"""\
/S { show } bind def
/P { moveto show } bind def
/M { moveto } bind def
/X { 0 rmoveto } bind def
/Y { 0 exch rmoveto } bind def
/E { findfont
dup maxlength dict begin
{
1 index /FID ne { def } { pop pop } ifelse
} forall
/Encoding exch def
dup /FontName exch def
currentdict end definefont pop
} bind def
/F { findfont exch scalefont dup setfont
[ exch /setfont cvx ] cvx bind def
} bind def
"""
#
# VDI.PS -- PostScript driver for VDI meta commands
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
VDI_PS = b"""\
/Vm { moveto } bind def
/Va { newpath arcn stroke } bind def
/Vl { moveto lineto stroke } bind def
/Vc { newpath 0 360 arc closepath } bind def
/Vr { exch dup 0 rlineto
exch dup 0 exch rlineto
exch neg 0 rlineto
0 exch neg rlineto
setgray fill } bind def
/Tm matrix def
/Ve { Tm currentmatrix pop
translate scale newpath 0 0 .5 0 360 arc closepath
Tm setmatrix
} bind def
/Vf { currentgray exch setgray fill setgray } bind def
"""
#
# ERROR.PS -- Error handler
#
# History:
# 89-11-21 fl: created (pslist 1.10)
#
ERROR_PS = b"""\
/landscape false def
/errorBUF 200 string def
/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def
errordict begin /handleerror {
initmatrix /Courier findfont 10 scalefont setfont
newpath 72 720 moveto $error begin /newerror false def
(PostScript Error) show errorNL errorNL
(Error: ) show
/errorname load errorBUF cvs show errorNL errorNL
(Command: ) show
/command load dup type /stringtype ne { errorBUF cvs } if show
errorNL errorNL
(VMstatus: ) show
vmstatus errorBUF cvs show ( bytes available, ) show
errorBUF cvs show ( bytes used at level ) show
errorBUF cvs show errorNL errorNL
(Operand stargck: ) show errorNL /ostargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall errorNL
(Execution stargck: ) show errorNL /estargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall
end showpage
} def end
"""
venv\Lib\site-packages\PIL\QoiImagePlugin.py
#
# The Python Imaging Library.
#
# QOI support for PIL
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
from typing import IO
from . import Image, ImageFile
from ._binary import i32be as i32
from ._binary import o8
from ._binary import o32be as o32
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"qoif")
class QoiImageFile(ImageFile.ImageFile):
format = "QOI"
format_description = "Quite OK Image"
def _open(self) -> None:
if not _accept(self.fp.read(4)):
msg = "not a QOI file"
raise SyntaxError(msg)
self._size = i32(self.fp.read(4)), i32(self.fp.read(4))
channels = self.fp.read(1)[0]
self._mode = "RGB" if channels == 3 else "RGBA"
self.fp.seek(1, os.SEEK_CUR) # colorspace
self.tile = [ImageFile._Tile("qoi", (0, 0) + self._size, self.fp.tell())]
class QoiDecoder(ImageFile.PyDecoder):
_pulls_fd = True
_previous_pixel: bytes | bytearray | None = None
_previously_seen_pixels: dict[int, bytes | bytearray] = {}
def _add_to_previous_pixels(self, value: bytes | bytearray) -> None:
self._previous_pixel = value
r, g, b, a = value
hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64
self._previously_seen_pixels[hash_value] = value
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
self._previously_seen_pixels = {}
self._previous_pixel = bytearray((0, 0, 0, 255))
data = bytearray()
bands = Image.getmodebands(self.mode)
dest_length = self.state.xsize * self.state.ysize * bands
while len(data) < dest_length:
byte = self.fd.read(1)[0]
value: bytes | bytearray
if byte == 0b11111110 and self._previous_pixel: # QOI_OP_RGB
value = bytearray(self.fd.read(3)) + self._previous_pixel[3:]
elif byte == 0b11111111: # QOI_OP_RGBA
value = self.fd.read(4)
else:
op = byte >> 6
if op == 0: # QOI_OP_INDEX
op_index = byte & 0b00111111
value = self._previously_seen_pixels.get(
op_index, bytearray((0, 0, 0, 0))
)
elif op == 1 and self._previous_pixel: # QOI_OP_DIFF
value = bytearray(
(
(self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)
% 256,
(self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)
% 256,
(self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,
self._previous_pixel[3],
)
)
elif op == 2 and self._previous_pixel: # QOI_OP_LUMA
second_byte = self.fd.read(1)[0]
diff_green = (byte & 0b00111111) - 32
diff_red = ((second_byte & 0b11110000) >> 4) - 8
diff_blue = (second_byte & 0b00001111) - 8
value = bytearray(
tuple(
(self._previous_pixel[i] + diff_green + diff) % 256
for i, diff in enumerate((diff_red, 0, diff_blue))
)
)
value += self._previous_pixel[3:]
elif op == 3 and self._previous_pixel: # QOI_OP_RUN
run_length = (byte & 0b00111111) + 1
value = self._previous_pixel
if bands == 3:
value = value[:3]
data += value * run_length
continue
self._add_to_previous_pixels(value)
if bands == 3:
value = value[:3]
data += value
self.set_as_raw(data)
return -1, 0
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode == "RGB":
channels = 3
elif im.mode == "RGBA":
channels = 4
else:
msg = "Unsupported QOI image mode"
raise ValueError(msg)
colorspace = 0 if im.encoderinfo.get("colorspace") == "sRGB" else 1
fp.write(b"qoif")
fp.write(o32(im.size[0]))
fp.write(o32(im.size[1]))
fp.write(o8(channels))
fp.write(o8(colorspace))
ImageFile._save(im, fp, [ImageFile._Tile("qoi", (0, 0) + im.size)])
class QoiEncoder(ImageFile.PyEncoder):
_pushes_fd = True
_previous_pixel: tuple[int, int, int, int] | None = None
_previously_seen_pixels: dict[int, tuple[int, int, int, int]] = {}
_run = 0
def _write_run(self) -> bytes:
data = o8(0b11000000 | (self._run - 1)) # QOI_OP_RUN
self._run = 0
return data
def _delta(self, left: int, right: int) -> int:
result = (left - right) & 255
if result >= 128:
result -= 256
return result
def encode(self, bufsize: int) -> tuple[int, int, bytes]:
assert self.im is not None
self._previously_seen_pixels = {0: (0, 0, 0, 0)}
self._previous_pixel = (0, 0, 0, 255)
data = bytearray()
w, h = self.im.size
bands = Image.getmodebands(self.mode)
for y in range(h):
for x in range(w):
pixel = self.im.getpixel((x, y))
if bands == 3:
pixel = (*pixel, 255)
if pixel == self._previous_pixel:
self._run += 1
if self._run == 62:
data += self._write_run()
else:
if self._run:
data += self._write_run()
r, g, b, a = pixel
hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64
if self._previously_seen_pixels.get(hash_value) == pixel:
data += o8(hash_value) # QOI_OP_INDEX
elif self._previous_pixel:
self._previously_seen_pixels[hash_value] = pixel
prev_r, prev_g, prev_b, prev_a = self._previous_pixel
if prev_a == a:
delta_r = self._delta(r, prev_r)
delta_g = self._delta(g, prev_g)
delta_b = self._delta(b, prev_b)
if (
-2 <= delta_r < 2
and -2 <= delta_g < 2
and -2 <= delta_b < 2
):
data += o8(
0b01000000
| (delta_r + 2) << 4
| (delta_g + 2) << 2
| (delta_b + 2)
) # QOI_OP_DIFF
else:
delta_gr = self._delta(delta_r, delta_g)
delta_gb = self._delta(delta_b, delta_g)
if (
-8 <= delta_gr < 8
and -32 <= delta_g < 32
and -8 <= delta_gb < 8
):
data += o8(
0b10000000 | (delta_g + 32)
) # QOI_OP_LUMA
data += o8((delta_gr + 8) << 4 | (delta_gb + 8))
else:
data += o8(0b11111110) # QOI_OP_RGB
data += bytes(pixel[:3])
else:
data += o8(0b11111111) # QOI_OP_RGBA
data += bytes(pixel)
self._previous_pixel = pixel
if self._run:
data += self._write_run()
data += bytes((0, 0, 0, 0, 0, 0, 0, 1)) # padding
return len(data), 0, data
Image.register_open(QoiImageFile.format, QoiImageFile, _accept)
Image.register_decoder("qoi", QoiDecoder)
Image.register_extension(QoiImageFile.format, ".qoi")
Image.register_save(QoiImageFile.format, _save)
Image.register_encoder("qoi", QoiEncoder)
venv\Lib\site-packages\PIL\report.py
from __future__ import annotations
from .features import pilinfo
pilinfo(supported_formats=False)
venv\Lib\site-packages\PIL\SgiImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# SGI image file handling
#
# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli.
#
#
#
# History:
# 2017-22-07 mb Add RLE decompression
# 2016-16-10 mb Add save method without compression
# 1995-09-10 fl Created
#
# Copyright (c) 2016 by Mickael Bonfill.
# Copyright (c) 2008 by Karsten Hiddemann.
# Copyright (c) 1997 by Secret Labs AB.
# Copyright (c) 1995 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import os
import struct
from typing import IO
from . import Image, ImageFile
from ._binary import i16be as i16
from ._binary import o8
def _accept(prefix: bytes) -> bool:
return len(prefix) >= 2 and i16(prefix) == 474
MODES = {
(1, 1, 1): "L",
(1, 2, 1): "L",
(2, 1, 1): "L;16B",
(2, 2, 1): "L;16B",
(1, 3, 3): "RGB",
(2, 3, 3): "RGB;16B",
(1, 3, 4): "RGBA",
(2, 3, 4): "RGBA;16B",
}
##
# Image plugin for SGI images.
class SgiImageFile(ImageFile.ImageFile):
format = "SGI"
format_description = "SGI Image File Format"
def _open(self) -> None:
# HEAD
assert self.fp is not None
headlen = 512
s = self.fp.read(headlen)
if not _accept(s):
msg = "Not an SGI image file"
raise ValueError(msg)
# compression : verbatim or RLE
compression = s[2]
# bpc : 1 or 2 bytes (8bits or 16bits)
bpc = s[3]
# dimension : 1, 2 or 3 (depending on xsize, ysize and zsize)
dimension = i16(s, 4)
# xsize : width
xsize = i16(s, 6)
# ysize : height
ysize = i16(s, 8)
# zsize : channels count
zsize = i16(s, 10)
# determine mode from bits/zsize
try:
rawmode = MODES[(bpc, dimension, zsize)]
except KeyError:
msg = "Unsupported SGI image mode"
raise ValueError(msg)
self._size = xsize, ysize
self._mode = rawmode.split(";")[0]
if self.mode == "RGB":
self.custom_mimetype = "image/rgb"
# orientation -1 : scanlines begins at the bottom-left corner
orientation = -1
# decoder info
if compression == 0:
pagesize = xsize * ysize * bpc
if bpc == 2:
self.tile = [
ImageFile._Tile(
"SGI16",
(0, 0) + self.size,
headlen,
(self.mode, 0, orientation),
)
]
else:
self.tile = []
offset = headlen
for layer in self.mode:
self.tile.append(
ImageFile._Tile(
"raw", (0, 0) + self.size, offset, (layer, 0, orientation)
)
)
offset += pagesize
elif compression == 1:
self.tile = [
ImageFile._Tile(
"sgi_rle", (0, 0) + self.size, headlen, (rawmode, orientation, bpc)
)
]
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode not in {"RGB", "RGBA", "L"}:
msg = "Unsupported SGI image mode"
raise ValueError(msg)
# Get the keyword arguments
info = im.encoderinfo
# Byte-per-pixel precision, 1 = 8bits per pixel
bpc = info.get("bpc", 1)
if bpc not in (1, 2):
msg = "Unsupported number of bytes per pixel"
raise ValueError(msg)
# Flip the image, since the origin of SGI file is the bottom-left corner
orientation = -1
# Define the file as SGI File Format
magic_number = 474
# Run-Length Encoding Compression - Unsupported at this time
rle = 0
# X Dimension = width / Y Dimension = height
x, y = im.size
# Z Dimension: Number of channels
z = len(im.mode)
# Number of dimensions (x,y,z)
if im.mode == "L":
dimension = 1 if y == 1 else 2
else:
dimension = 3
# Minimum Byte value
pinmin = 0
# Maximum Byte value (255 = 8bits per pixel)
pinmax = 255
# Image name (79 characters max, truncated below in write)
img_name = os.path.splitext(os.path.basename(filename))[0]
if isinstance(img_name, str):
img_name = img_name.encode("ascii", "ignore")
# Standard representation of pixel in the file
colormap = 0
fp.write(struct.pack(">h", magic_number))
fp.write(o8(rle))
fp.write(o8(bpc))
fp.write(struct.pack(">H", dimension))
fp.write(struct.pack(">H", x))
fp.write(struct.pack(">H", y))
fp.write(struct.pack(">H", z))
fp.write(struct.pack(">l", pinmin))
fp.write(struct.pack(">l", pinmax))
fp.write(struct.pack("4s", b"")) # dummy
fp.write(struct.pack("79s", img_name)) # truncates to 79 chars
fp.write(struct.pack("s", b"")) # force null byte after img_name
fp.write(struct.pack(">l", colormap))
fp.write(struct.pack("404s", b"")) # dummy
rawmode = "L"
if bpc == 2:
rawmode = "L;16B"
for channel in im.split():
fp.write(channel.tobytes("raw", rawmode, 0, orientation))
if hasattr(fp, "flush"):
fp.flush()
class SGI16Decoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
assert self.im is not None
rawmode, stride, orientation = self.args
pagesize = self.state.xsize * self.state.ysize
zsize = len(self.mode)
self.fd.seek(512)
for band in range(zsize):
channel = Image.new("L", (self.state.xsize, self.state.ysize))
channel.frombytes(
self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation
)
self.im.putband(channel.im, band)
return -1, 0
#
# registry
Image.register_decoder("SGI16", SGI16Decoder)
Image.register_open(SgiImageFile.format, SgiImageFile, _accept)
Image.register_save(SgiImageFile.format, _save)
Image.register_mime(SgiImageFile.format, "image/sgi")
Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"])
# End of file
venv\Lib\site-packages\PIL\SpiderImagePlugin.py
#
# The Python Imaging Library.
#
# SPIDER image file handling
#
# History:
# 2004-08-02 Created BB
# 2006-03-02 added save method
# 2006-03-13 added support for stack images
#
# Copyright (c) 2004 by Health Research Inc. (HRI) RENSSELAER, NY 12144.
# Copyright (c) 2004 by William Baxter.
# Copyright (c) 2004 by Secret Labs AB.
# Copyright (c) 2004 by Fredrik Lundh.
#
##
# Image plugin for the Spider image format. This format is used
# by the SPIDER software, in processing image data from electron
# microscopy and tomography.
##
#
# SpiderImagePlugin.py
#
# The Spider image format is used by SPIDER software, in processing
# image data from electron microscopy and tomography.
#
# Spider home page:
# https://spider.wadsworth.org/spider_doc/spider/docs/spider.html
#
# Details about the Spider image format:
# https://spider.wadsworth.org/spider_doc/spider/docs/image_doc.html
#
from __future__ import annotations
import os
import struct
import sys
from typing import IO, Any, cast
from . import Image, ImageFile
from ._util import DeferredError
TYPE_CHECKING = False
def isInt(f: Any) -> int:
try:
i = int(f)
if f - i == 0:
return 1
else:
return 0
except (ValueError, OverflowError):
return 0
iforms = [1, 3, -11, -12, -21, -22]
# There is no magic number to identify Spider files, so just check a
# series of header locations to see if they have reasonable values.
# Returns no. of bytes in the header, if it is a valid Spider header,
# otherwise returns 0
def isSpiderHeader(t: tuple[float, ...]) -> int:
h = (99,) + t # add 1 value so can use spider header index start=1
# header values 1,2,5,12,13,22,23 should be integers
for i in [1, 2, 5, 12, 13, 22, 23]:
if not isInt(h[i]):
return 0
# check iform
iform = int(h[5])
if iform not in iforms:
return 0
# check other header values
labrec = int(h[13]) # no. records in file header
labbyt = int(h[22]) # total no. of bytes in header
lenbyt = int(h[23]) # record length in bytes
if labbyt != (labrec * lenbyt):
return 0
# looks like a valid header
return labbyt
def isSpiderImage(filename: str) -> int:
with open(filename, "rb") as fp:
f = fp.read(92) # read 23 * 4 bytes
t = struct.unpack(">23f", f) # try big-endian first
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
t = struct.unpack("<23f", f) # little-endian
hdrlen = isSpiderHeader(t)
return hdrlen
class SpiderImageFile(ImageFile.ImageFile):
format = "SPIDER"
format_description = "Spider 2D image"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# check header
n = 27 * 4 # read 27 float values
f = self.fp.read(n)
try:
self.bigendian = 1
t = struct.unpack(">27f", f) # try big-endian first
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
self.bigendian = 0
t = struct.unpack("<27f", f) # little-endian
hdrlen = isSpiderHeader(t)
if hdrlen == 0:
msg = "not a valid Spider file"
raise SyntaxError(msg)
except struct.error as e:
msg = "not a valid Spider file"
raise SyntaxError(msg) from e
h = (99,) + t # add 1 value : spider header index starts at 1
iform = int(h[5])
if iform != 1:
msg = "not a Spider 2D image"
raise SyntaxError(msg)
self._size = int(h[12]), int(h[2]) # size in pixels (width, height)
self.istack = int(h[24])
self.imgnumber = int(h[27])
if self.istack == 0 and self.imgnumber == 0:
# stk=0, img=0: a regular 2D image
offset = hdrlen
self._nimages = 1
elif self.istack > 0 and self.imgnumber == 0:
# stk>0, img=0: Opening the stack for the first time
self.imgbytes = int(h[12]) * int(h[2]) * 4
self.hdrlen = hdrlen
self._nimages = int(h[26])
# Point to the first image in the stack
offset = hdrlen * 2
self.imgnumber = 1
elif self.istack == 0 and self.imgnumber > 0:
# stk=0, img>0: an image within the stack
offset = hdrlen + self.stkoffset
self.istack = 2 # So Image knows it's still a stack
else:
msg = "inconsistent stack header values"
raise SyntaxError(msg)
if self.bigendian:
self.rawmode = "F;32BF"
else:
self.rawmode = "F;32F"
self._mode = "F"
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, offset, self.rawmode)]
self._fp = self.fp # FIXME: hack
@property
def n_frames(self) -> int:
return self._nimages
@property
def is_animated(self) -> bool:
return self._nimages > 1
# 1st image index is zero (although SPIDER imgnumber starts at 1)
def tell(self) -> int:
if self.imgnumber < 1:
return 0
else:
return self.imgnumber - 1
def seek(self, frame: int) -> None:
if self.istack == 0:
msg = "attempt to seek in a non-stack file"
raise EOFError(msg)
if not self._seek_check(frame):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.stkoffset = self.hdrlen + frame * (self.hdrlen + self.imgbytes)
self.fp = self._fp
self.fp.seek(self.stkoffset)
self._open()
# returns a byte image after rescaling to 0..255
def convert2byte(self, depth: int = 255) -> Image.Image:
extrema = self.getextrema()
assert isinstance(extrema[0], float)
minimum, maximum = cast(tuple[float, float], extrema)
m: float = 1
if maximum != minimum:
m = depth / (maximum - minimum)
b = -m * minimum
return self.point(lambda i: i * m + b).convert("L")
if TYPE_CHECKING:
from . import ImageTk
# returns a ImageTk.PhotoImage object, after rescaling to 0..255
def tkPhotoImage(self) -> ImageTk.PhotoImage:
from . import ImageTk
return ImageTk.PhotoImage(self.convert2byte(), palette=256)
# --------------------------------------------------------------------
# Image series
# given a list of filenames, return a list of images
def loadImageSeries(filelist: list[str] | None = None) -> list[Image.Image] | None:
"""create a list of :py:class:`~PIL.Image.Image` objects for use in a montage"""
if filelist is None or len(filelist) < 1:
return None
byte_imgs = []
for img in filelist:
if not os.path.exists(img):
print(f"unable to find {img}")
continue
try:
with Image.open(img) as im:
assert isinstance(im, SpiderImageFile)
byte_im = im.convert2byte()
except Exception:
if not isSpiderImage(img):
print(f"{img} is not a Spider image file")
continue
byte_im.info["filename"] = img
byte_imgs.append(byte_im)
return byte_imgs
# --------------------------------------------------------------------
# For saving images in Spider format
def makeSpiderHeader(im: Image.Image) -> list[bytes]:
nsam, nrow = im.size
lenbyt = nsam * 4 # There are labrec records in the header
labrec = int(1024 / lenbyt)
if 1024 % lenbyt != 0:
labrec += 1
labbyt = labrec * lenbyt
nvalues = int(labbyt / 4)
if nvalues < 23:
return []
hdr = [0.0] * nvalues
# NB these are Fortran indices
hdr[1] = 1.0 # nslice (=1 for an image)
hdr[2] = float(nrow) # number of rows per slice
hdr[3] = float(nrow) # number of records in the image
hdr[5] = 1.0 # iform for 2D image
hdr[12] = float(nsam) # number of pixels per line
hdr[13] = float(labrec) # number of records in file header
hdr[22] = float(labbyt) # total number of bytes in header
hdr[23] = float(lenbyt) # record length in bytes
# adjust for Fortran indexing
hdr = hdr[1:]
hdr.append(0.0)
# pack binary data into a string
return [struct.pack("f", v) for v in hdr]
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode != "F":
im = im.convert("F")
hdr = makeSpiderHeader(im)
if len(hdr) < 256:
msg = "Error creating Spider header"
raise OSError(msg)
# write the SPIDER header
fp.writelines(hdr)
rawmode = "F;32NF" # 32-bit native floating point
ImageFile._save(im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, rawmode)])
def _save_spider(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
# get the filename extension and register it with Image
filename_ext = os.path.splitext(filename)[1]
ext = filename_ext.decode() if isinstance(filename_ext, bytes) else filename_ext
Image.register_extension(SpiderImageFile.format, ext)
_save(im, fp, filename)
# --------------------------------------------------------------------
Image.register_open(SpiderImageFile.format, SpiderImageFile)
Image.register_save(SpiderImageFile.format, _save_spider)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Syntax: python3 SpiderImagePlugin.py [infile] [outfile]")
sys.exit()
filename = sys.argv[1]
if not isSpiderImage(filename):
print("input image must be in Spider format")
sys.exit()
with Image.open(filename) as im:
print(f"image: {im}")
print(f"format: {im.format}")
print(f"size: {im.size}")
print(f"mode: {im.mode}")
print("max, min: ", end=" ")
print(im.getextrema())
if len(sys.argv) > 2:
outfile = sys.argv[2]
# perform some image operation
im = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
print(
f"saving a flipped version of {os.path.basename(filename)} "
f"as {outfile} "
)
im.save(outfile, SpiderImageFile.format)
venv\Lib\site-packages\PIL\SunImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
from . import Image, ImageFile, ImagePalette
from ._binary import i32be as i32
def _accept(prefix: bytes) -> bool:
return len(prefix) >= 4 and i32(prefix) == 0x59A66A95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self) -> None:
# The Sun Raster file header is 32 bytes in length
# and has the following format:
# typedef struct _SunRaster
# {
# DWORD MagicNumber; /* Magic (identification) number */
# DWORD Width; /* Width of image in pixels */
# DWORD Height; /* Height of image in pixels */
# DWORD Depth; /* Number of bits per pixel */
# DWORD Length; /* Size of image data in bytes */
# DWORD Type; /* Type of raster file */
# DWORD ColorMapType; /* Type of color map */
# DWORD ColorMapLength; /* Size of the color map in bytes */
# } SUNRASTER;
assert self.fp is not None
# HEAD
s = self.fp.read(32)
if not _accept(s):
msg = "not an SUN raster file"
raise SyntaxError(msg)
offset = 32
self._size = i32(s, 4), i32(s, 8)
depth = i32(s, 12)
# data_length = i32(s, 16) # unreliable, ignore.
file_type = i32(s, 20)
palette_type = i32(s, 24) # 0: None, 1: RGB, 2: Raw/arbitrary
palette_length = i32(s, 28)
if depth == 1:
self._mode, rawmode = "1", "1;I"
elif depth == 4:
self._mode, rawmode = "L", "L;4"
elif depth == 8:
self._mode = rawmode = "L"
elif depth == 24:
if file_type == 3:
self._mode, rawmode = "RGB", "RGB"
else:
self._mode, rawmode = "RGB", "BGR"
elif depth == 32:
if file_type == 3:
self._mode, rawmode = "RGB", "RGBX"
else:
self._mode, rawmode = "RGB", "BGRX"
else:
msg = "Unsupported Mode/Bit Depth"
raise SyntaxError(msg)
if palette_length:
if palette_length > 1024:
msg = "Unsupported Color Palette Length"
raise SyntaxError(msg)
if palette_type != 1:
msg = "Unsupported Palette Type"
raise SyntaxError(msg)
offset = offset + palette_length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length))
if self.mode == "L":
self._mode = "P"
rawmode = rawmode.replace("L", "P")
# 16 bit boundaries on stride
stride = ((self.size[0] * depth + 15) // 16) * 2
# file type: Type is the version (or flavor) of the bitmap
# file. The following values are typically found in the Type
# field:
# 0000h Old
# 0001h Standard
# 0002h Byte-encoded
# 0003h RGB format
# 0004h TIFF format
# 0005h IFF format
# FFFFh Experimental
# Old and standard are the same, except for the length tag.
# byte-encoded is run-length-encoded
# RGB looks similar to standard, but RGB byte order
# TIFF and IFF mean that they were converted from T/IFF
# Experimental means that it's something else.
# (https://www.fileformat.info/format/sunraster/egff.htm)
if file_type in (0, 1, 3, 4, 5):
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offset, (rawmode, stride))
]
elif file_type == 2:
self.tile = [
ImageFile._Tile("sun_rle", (0, 0) + self.size, offset, rawmode)
]
else:
msg = "Unsupported Sun Raster file type"
raise SyntaxError(msg)
#
# registry
Image.register_open(SunImageFile.format, SunImageFile, _accept)
Image.register_extension(SunImageFile.format, ".ras")
venv\Lib\site-packages\PIL\TarIO.py
#
# The Python Imaging Library.
# $Id$
#
# read files from within a tar file
#
# History:
# 95-06-18 fl Created
# 96-05-28 fl Open files in binary mode
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-96.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
from . import ContainerIO
class TarIO(ContainerIO.ContainerIO[bytes]):
"""A file object that provides read access to a given member of a TAR file."""
def __init__(self, tarfile: str, file: str) -> None:
"""
Create file object.
:param tarfile: Name of TAR file.
:param file: Name of member file.
"""
self.fh = open(tarfile, "rb")
while True:
s = self.fh.read(512)
if len(s) != 512:
self.fh.close()
msg = "unexpected end of tar file"
raise OSError(msg)
name = s[:100].decode("utf-8")
i = name.find("\0")
if i == 0:
self.fh.close()
msg = "cannot find subfile"
raise OSError(msg)
if i > 0:
name = name[:i]
size = int(s[124:135], 8)
if file == name:
break
self.fh.seek((size + 511) & (~511), io.SEEK_CUR)
# Open region
super().__init__(self.fh, self.fh.tell(), size)
venv\Lib\site-packages\PIL\TgaImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# TGA file handling
#
# History:
# 95-09-01 fl created (reads 24-bit files only)
# 97-01-04 fl support more TGA versions, including compressed images
# 98-07-04 fl fixed orientation and alpha layer bugs
# 98-09-11 fl fixed orientation for runlength decoder
#
# Copyright (c) Secret Labs AB 1997-98.
# Copyright (c) Fredrik Lundh 1995-97.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import warnings
from typing import IO
from . import Image, ImageFile, ImagePalette
from ._binary import i16le as i16
from ._binary import o8
from ._binary import o16le as o16
#
# --------------------------------------------------------------------
# Read RGA file
MODES = {
# map imagetype/depth to rawmode
(1, 8): "P",
(3, 1): "1",
(3, 8): "L",
(3, 16): "LA",
(2, 16): "BGRA;15Z",
(2, 24): "BGR",
(2, 32): "BGRA",
}
##
# Image plugin for Targa files.
class TgaImageFile(ImageFile.ImageFile):
format = "TGA"
format_description = "Targa"
def _open(self) -> None:
# process header
assert self.fp is not None
s = self.fp.read(18)
id_len = s[0]
colormaptype = s[1]
imagetype = s[2]
depth = s[16]
flags = s[17]
self._size = i16(s, 12), i16(s, 14)
# validate header fields
if (
colormaptype not in (0, 1)
or self.size[0] <= 0
or self.size[1] <= 0
or depth not in (1, 8, 16, 24, 32)
):
msg = "not a TGA file"
raise SyntaxError(msg)
# image mode
if imagetype in (3, 11):
self._mode = "L"
if depth == 1:
self._mode = "1" # ???
elif depth == 16:
self._mode = "LA"
elif imagetype in (1, 9):
self._mode = "P" if colormaptype else "L"
elif imagetype in (2, 10):
self._mode = "RGB" if depth == 24 else "RGBA"
else:
msg = "unknown TGA mode"
raise SyntaxError(msg)
# orientation
orientation = flags & 0x30
self._flip_horizontally = orientation in [0x10, 0x30]
if orientation in [0x20, 0x30]:
orientation = 1
elif orientation in [0, 0x10]:
orientation = -1
else:
msg = "unknown TGA orientation"
raise SyntaxError(msg)
self.info["orientation"] = orientation
if imagetype & 8:
self.info["compression"] = "tga_rle"
if id_len:
self.info["id_section"] = self.fp.read(id_len)
if colormaptype:
# read palette
start, size, mapdepth = i16(s, 3), i16(s, 5), s[7]
if mapdepth == 16:
self.palette = ImagePalette.raw(
"BGRA;15Z", bytes(2 * start) + self.fp.read(2 * size)
)
self.palette.mode = "RGBA"
elif mapdepth == 24:
self.palette = ImagePalette.raw(
"BGR", bytes(3 * start) + self.fp.read(3 * size)
)
elif mapdepth == 32:
self.palette = ImagePalette.raw(
"BGRA", bytes(4 * start) + self.fp.read(4 * size)
)
else:
msg = "unknown TGA map depth"
raise SyntaxError(msg)
# setup tile descriptor
try:
rawmode = MODES[(imagetype & 7, depth)]
if imagetype & 8:
# compressed
self.tile = [
ImageFile._Tile(
"tga_rle",
(0, 0) + self.size,
self.fp.tell(),
(rawmode, orientation, depth),
)
]
else:
self.tile = [
ImageFile._Tile(
"raw",
(0, 0) + self.size,
self.fp.tell(),
(rawmode, 0, orientation),
)
]
except KeyError:
pass # cannot decode
def load_end(self) -> None:
if self._flip_horizontally:
self.im = self.im.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
#
# --------------------------------------------------------------------
# Write TGA file
SAVE = {
"1": ("1", 1, 0, 3),
"L": ("L", 8, 0, 3),
"LA": ("LA", 16, 0, 3),
"P": ("P", 8, 1, 1),
"RGB": ("BGR", 24, 0, 2),
"RGBA": ("BGRA", 32, 0, 2),
}
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
try:
rawmode, bits, colormaptype, imagetype = SAVE[im.mode]
except KeyError as e:
msg = f"cannot write mode {im.mode} as TGA"
raise OSError(msg) from e
if "rle" in im.encoderinfo:
rle = im.encoderinfo["rle"]
else:
compression = im.encoderinfo.get("compression", im.info.get("compression"))
rle = compression == "tga_rle"
if rle:
imagetype += 8
id_section = im.encoderinfo.get("id_section", im.info.get("id_section", ""))
id_len = len(id_section)
if id_len > 255:
id_len = 255
id_section = id_section[:255]
warnings.warn("id_section has been trimmed to 255 characters")
if colormaptype:
palette = im.im.getpalette("RGB", "BGR")
colormaplength, colormapentry = len(palette) // 3, 24
else:
colormaplength, colormapentry = 0, 0
if im.mode in ("LA", "RGBA"):
flags = 8
else:
flags = 0
orientation = im.encoderinfo.get("orientation", im.info.get("orientation", -1))
if orientation > 0:
flags = flags | 0x20
fp.write(
o8(id_len)
+ o8(colormaptype)
+ o8(imagetype)
+ o16(0) # colormapfirst
+ o16(colormaplength)
+ o8(colormapentry)
+ o16(0)
+ o16(0)
+ o16(im.size[0])
+ o16(im.size[1])
+ o8(bits)
+ o8(flags)
)
if id_section:
fp.write(id_section)
if colormaptype:
fp.write(palette)
if rle:
ImageFile._save(
im,
fp,
[ImageFile._Tile("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))],
)
else:
ImageFile._save(
im,
fp,
[ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))],
)
# write targa version 2 footer
fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000")
#
# --------------------------------------------------------------------
# Registry
Image.register_open(TgaImageFile.format, TgaImageFile)
Image.register_save(TgaImageFile.format, _save)
Image.register_extensions(TgaImageFile.format, [".tga", ".icb", ".vda", ".vst"])
Image.register_mime(TgaImageFile.format, "image/x-tga")
venv\Lib\site-packages\PIL\TiffImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# TIFF file handling
#
# TIFF is a flexible, if somewhat aged, image file format originally
# defined by Aldus. Although TIFF supports a wide variety of pixel
# layouts and compression methods, the name doesn't really stand for
# "thousands of incompatible file formats," it just feels that way.
#
# To read TIFF data from a stream, the stream must be seekable. For
# progressive decoding, make sure to use TIFF files where the tag
# directory is placed first in the file.
#
# History:
# 1995-09-01 fl Created
# 1996-05-04 fl Handle JPEGTABLES tag
# 1996-05-18 fl Fixed COLORMAP support
# 1997-01-05 fl Fixed PREDICTOR support
# 1997-08-27 fl Added support for rational tags (from Perry Stoll)
# 1998-01-10 fl Fixed seek/tell (from Jan Blom)
# 1998-07-15 fl Use private names for internal variables
# 1999-06-13 fl Rewritten for PIL 1.0 (1.0)
# 2000-10-11 fl Additional fixes for Python 2.0 (1.1)
# 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2)
# 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3)
# 2001-12-18 fl Added workaround for broken Matrox library
# 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart)
# 2003-05-19 fl Check FILLORDER tag
# 2003-09-26 fl Added RGBa support
# 2004-02-24 fl Added DPI support; fixed rational write support
# 2005-02-07 fl Added workaround for broken Corel Draw 10 files
# 2006-01-09 fl Added support for float/double tags (from Russell Nelson)
#
# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import io
import itertools
import logging
import math
import os
import struct
import warnings
from collections.abc import Iterator, MutableMapping
from fractions import Fraction
from numbers import Number, Rational
from typing import IO, Any, Callable, NoReturn, cast
from . import ExifTags, Image, ImageFile, ImageOps, ImagePalette, TiffTags
from ._binary import i16be as i16
from ._binary import i32be as i32
from ._binary import o8
from ._deprecate import deprecate
from ._typing import StrOrBytesPath
from ._util import DeferredError, is_path
from .TiffTags import TYPES
TYPE_CHECKING = False
if TYPE_CHECKING:
from ._typing import Buffer, IntegralLike
logger = logging.getLogger(__name__)
# Set these to true to force use of libtiff for reading or writing.
READ_LIBTIFF = False
WRITE_LIBTIFF = False
STRIP_SIZE = 65536
II = b"II" # little-endian (Intel style)
MM = b"MM" # big-endian (Motorola style)
#
# --------------------------------------------------------------------
# Read TIFF files
# a few tag names, just to make the code below a bit more readable
OSUBFILETYPE = 255
IMAGEWIDTH = 256
IMAGELENGTH = 257
BITSPERSAMPLE = 258
COMPRESSION = 259
PHOTOMETRIC_INTERPRETATION = 262
FILLORDER = 266
IMAGEDESCRIPTION = 270
STRIPOFFSETS = 273
SAMPLESPERPIXEL = 277
ROWSPERSTRIP = 278
STRIPBYTECOUNTS = 279
X_RESOLUTION = 282
Y_RESOLUTION = 283
PLANAR_CONFIGURATION = 284
RESOLUTION_UNIT = 296
TRANSFERFUNCTION = 301
SOFTWARE = 305
DATE_TIME = 306
ARTIST = 315
PREDICTOR = 317
COLORMAP = 320
TILEWIDTH = 322
TILELENGTH = 323
TILEOFFSETS = 324
TILEBYTECOUNTS = 325
SUBIFD = 330
EXTRASAMPLES = 338
SAMPLEFORMAT = 339
JPEGTABLES = 347
YCBCRSUBSAMPLING = 530
REFERENCEBLACKWHITE = 532
COPYRIGHT = 33432
IPTC_NAA_CHUNK = 33723 # newsphoto properties
PHOTOSHOP_CHUNK = 34377 # photoshop properties
ICCPROFILE = 34675
EXIFIFD = 34665
XMP = 700
JPEGQUALITY = 65537 # pseudo-tag by libtiff
# https://github.com/imagej/ImageJA/blob/master/src/main/java/ij/io/TiffDecoder.java
IMAGEJ_META_DATA_BYTE_COUNTS = 50838
IMAGEJ_META_DATA = 50839
COMPRESSION_INFO = {
# Compression => pil compression name
1: "raw",
2: "tiff_ccitt",
3: "group3",
4: "group4",
5: "tiff_lzw",
6: "tiff_jpeg", # obsolete
7: "jpeg",
8: "tiff_adobe_deflate",
32771: "tiff_raw_16", # 16-bit padding
32773: "packbits",
32809: "tiff_thunderscan",
32946: "tiff_deflate",
34676: "tiff_sgilog",
34677: "tiff_sgilog24",
34925: "lzma",
50000: "zstd",
50001: "webp",
}
COMPRESSION_INFO_REV = {v: k for k, v in COMPRESSION_INFO.items()}
OPEN_INFO = {
# (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample,
# ExtraSamples) => mode, rawmode
(II, 0, (1,), 1, (1,), ()): ("1", "1;I"),
(MM, 0, (1,), 1, (1,), ()): ("1", "1;I"),
(II, 0, (1,), 2, (1,), ()): ("1", "1;IR"),
(MM, 0, (1,), 2, (1,), ()): ("1", "1;IR"),
(II, 1, (1,), 1, (1,), ()): ("1", "1"),
(MM, 1, (1,), 1, (1,), ()): ("1", "1"),
(II, 1, (1,), 2, (1,), ()): ("1", "1;R"),
(MM, 1, (1,), 2, (1,), ()): ("1", "1;R"),
(II, 0, (1,), 1, (2,), ()): ("L", "L;2I"),
(MM, 0, (1,), 1, (2,), ()): ("L", "L;2I"),
(II, 0, (1,), 2, (2,), ()): ("L", "L;2IR"),
(MM, 0, (1,), 2, (2,), ()): ("L", "L;2IR"),
(II, 1, (1,), 1, (2,), ()): ("L", "L;2"),
(MM, 1, (1,), 1, (2,), ()): ("L", "L;2"),
(II, 1, (1,), 2, (2,), ()): ("L", "L;2R"),
(MM, 1, (1,), 2, (2,), ()): ("L", "L;2R"),
(II, 0, (1,), 1, (4,), ()): ("L", "L;4I"),
(MM, 0, (1,), 1, (4,), ()): ("L", "L;4I"),
(II, 0, (1,), 2, (4,), ()): ("L", "L;4IR"),
(MM, 0, (1,), 2, (4,), ()): ("L", "L;4IR"),
(II, 1, (1,), 1, (4,), ()): ("L", "L;4"),
(MM, 1, (1,), 1, (4,), ()): ("L", "L;4"),
(II, 1, (1,), 2, (4,), ()): ("L", "L;4R"),
(MM, 1, (1,), 2, (4,), ()): ("L", "L;4R"),
(II, 0, (1,), 1, (8,), ()): ("L", "L;I"),
(MM, 0, (1,), 1, (8,), ()): ("L", "L;I"),
(II, 0, (1,), 2, (8,), ()): ("L", "L;IR"),
(MM, 0, (1,), 2, (8,), ()): ("L", "L;IR"),
(II, 1, (1,), 1, (8,), ()): ("L", "L"),
(MM, 1, (1,), 1, (8,), ()): ("L", "L"),
(II, 1, (2,), 1, (8,), ()): ("L", "L"),
(MM, 1, (2,), 1, (8,), ()): ("L", "L"),
(II, 1, (1,), 2, (8,), ()): ("L", "L;R"),
(MM, 1, (1,), 2, (8,), ()): ("L", "L;R"),
(II, 1, (1,), 1, (12,), ()): ("I;16", "I;12"),
(II, 0, (1,), 1, (16,), ()): ("I;16", "I;16"),
(II, 1, (1,), 1, (16,), ()): ("I;16", "I;16"),
(MM, 1, (1,), 1, (16,), ()): ("I;16B", "I;16B"),
(II, 1, (1,), 2, (16,), ()): ("I;16", "I;16R"),
(II, 1, (2,), 1, (16,), ()): ("I", "I;16S"),
(MM, 1, (2,), 1, (16,), ()): ("I", "I;16BS"),
(II, 0, (3,), 1, (32,), ()): ("F", "F;32F"),
(MM, 0, (3,), 1, (32,), ()): ("F", "F;32BF"),
(II, 1, (1,), 1, (32,), ()): ("I", "I;32N"),
(II, 1, (2,), 1, (32,), ()): ("I", "I;32S"),
(MM, 1, (2,), 1, (32,), ()): ("I", "I;32BS"),
(II, 1, (3,), 1, (32,), ()): ("F", "F;32F"),
(MM, 1, (3,), 1, (32,), ()): ("F", "F;32BF"),
(II, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"),
(MM, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"),
(II, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"),
(MM, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"),
(II, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"),
(MM, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"),
(II, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples
(MM, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples
(II, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGB", "RGBX"),
(MM, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGB", "RGBX"),
(II, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGB", "RGBXX"),
(MM, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGB", "RGBXX"),
(II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGB", "RGBXXX"),
(MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGB", "RGBXXX"),
(II, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"),
(MM, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"),
(II, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"),
(MM, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"),
(II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"),
(MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"),
(II, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"),
(MM, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"),
(II, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"),
(MM, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"),
(II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"),
(MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"),
(II, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10
(MM, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10
(II, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16L"),
(MM, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16B"),
(II, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16L"),
(MM, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16B"),
(II, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGB", "RGBX;16L"),
(MM, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGB", "RGBX;16B"),
(II, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16L"),
(MM, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16B"),
(II, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16L"),
(MM, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16B"),
(II, 3, (1,), 1, (1,), ()): ("P", "P;1"),
(MM, 3, (1,), 1, (1,), ()): ("P", "P;1"),
(II, 3, (1,), 2, (1,), ()): ("P", "P;1R"),
(MM, 3, (1,), 2, (1,), ()): ("P", "P;1R"),
(II, 3, (1,), 1, (2,), ()): ("P", "P;2"),
(MM, 3, (1,), 1, (2,), ()): ("P", "P;2"),
(II, 3, (1,), 2, (2,), ()): ("P", "P;2R"),
(MM, 3, (1,), 2, (2,), ()): ("P", "P;2R"),
(II, 3, (1,), 1, (4,), ()): ("P", "P;4"),
(MM, 3, (1,), 1, (4,), ()): ("P", "P;4"),
(II, 3, (1,), 2, (4,), ()): ("P", "P;4R"),
(MM, 3, (1,), 2, (4,), ()): ("P", "P;4R"),
(II, 3, (1,), 1, (8,), ()): ("P", "P"),
(MM, 3, (1,), 1, (8,), ()): ("P", "P"),
(II, 3, (1,), 1, (8, 8), (0,)): ("P", "PX"),
(II, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"),
(MM, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"),
(II, 3, (1,), 2, (8,), ()): ("P", "P;R"),
(MM, 3, (1,), 2, (8,), ()): ("P", "P;R"),
(II, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"),
(MM, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"),
(II, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"),
(MM, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"),
(II, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"),
(MM, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"),
(II, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16L"),
(MM, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16B"),
(II, 6, (1,), 1, (8,), ()): ("L", "L"),
(MM, 6, (1,), 1, (8,), ()): ("L", "L"),
# JPEG compressed images handled by LibTiff and auto-converted to RGBX
# Minimal Baseline TIFF requires YCbCr images to have 3 SamplesPerPixel
(II, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"),
(MM, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"),
(II, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"),
(MM, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"),
}
MAX_SAMPLESPERPIXEL = max(len(key_tp[4]) for key_tp in OPEN_INFO)
PREFIXES = [
b"MM\x00\x2a", # Valid TIFF header with big-endian byte order
b"II\x2a\x00", # Valid TIFF header with little-endian byte order
b"MM\x2a\x00", # Invalid TIFF header, assume big-endian
b"II\x00\x2a", # Invalid TIFF header, assume little-endian
b"MM\x00\x2b", # BigTIFF with big-endian byte order
b"II\x2b\x00", # BigTIFF with little-endian byte order
]
if not getattr(Image.core, "libtiff_support_custom_tags", True):
deprecate("Support for LibTIFF earlier than version 4", 12)
def _accept(prefix: bytes) -> bool:
return prefix.startswith(tuple(PREFIXES))
def _limit_rational(
val: float | Fraction | IFDRational, max_val: int
) -> tuple[IntegralLike, IntegralLike]:
inv = abs(val) > 1
n_d = IFDRational(1 / val if inv else val).limit_rational(max_val)
return n_d[::-1] if inv else n_d
def _limit_signed_rational(
val: IFDRational, max_val: int, min_val: int
) -> tuple[IntegralLike, IntegralLike]:
frac = Fraction(val)
n_d: tuple[IntegralLike, IntegralLike] = frac.numerator, frac.denominator
if min(float(i) for i in n_d) < min_val:
n_d = _limit_rational(val, abs(min_val))
n_d_float = tuple(float(i) for i in n_d)
if max(n_d_float) > max_val:
n_d = _limit_rational(n_d_float[0] / n_d_float[1], max_val)
return n_d
##
# Wrapper for TIFF IFDs.
_load_dispatch = {}
_write_dispatch = {}
def _delegate(op: str) -> Any:
def delegate(
self: IFDRational, *args: tuple[float, ...]
) -> bool | float | Fraction:
return getattr(self._val, op)(*args)
return delegate
class IFDRational(Rational):
"""Implements a rational class where 0/0 is a legal value to match
the in the wild use of exif rationals.
e.g., DigitalZoomRatio - 0.00/0.00 indicates that no digital zoom was used
"""
""" If the denominator is 0, store this as a float('nan'), otherwise store
as a fractions.Fraction(). Delegate as appropriate
"""
__slots__ = ("_numerator", "_denominator", "_val")
def __init__(
self, value: float | Fraction | IFDRational, denominator: int = 1
) -> None:
"""
:param value: either an integer numerator, a
float/rational/other number, or an IFDRational
:param denominator: Optional integer denominator
"""
self._val: Fraction | float
if isinstance(value, IFDRational):
self._numerator = value.numerator
self._denominator = value.denominator
self._val = value._val
return
if isinstance(value, Fraction):
self._numerator = value.numerator
self._denominator = value.denominator
else:
if TYPE_CHECKING:
self._numerator = cast(IntegralLike, value)
else:
self._numerator = value
self._denominator = denominator
if denominator == 0:
self._val = float("nan")
elif denominator == 1:
self._val = Fraction(value)
elif int(value) == value:
self._val = Fraction(int(value), denominator)
else:
self._val = Fraction(value / denominator)
@property
def numerator(self) -> IntegralLike:
return self._numerator
@property
def denominator(self) -> int:
return self._denominator
def limit_rational(self, max_denominator: int) -> tuple[IntegralLike, int]:
"""
:param max_denominator: Integer, the maximum denominator value
:returns: Tuple of (numerator, denominator)
"""
if self.denominator == 0:
return self.numerator, self.denominator
assert isinstance(self._val, Fraction)
f = self._val.limit_denominator(max_denominator)
return f.numerator, f.denominator
def __repr__(self) -> str:
return str(float(self._val))
def __hash__(self) -> int: # type: ignore[override]
return self._val.__hash__()
def __eq__(self, other: object) -> bool:
val = self._val
if isinstance(other, IFDRational):
other = other._val
if isinstance(other, float):
val = float(val)
return val == other
def __getstate__(self) -> list[float | Fraction | IntegralLike]:
return [self._val, self._numerator, self._denominator]
def __setstate__(self, state: list[float | Fraction | IntegralLike]) -> None:
IFDRational.__init__(self, 0)
_val, _numerator, _denominator = state
assert isinstance(_val, (float, Fraction))
self._val = _val
if TYPE_CHECKING:
self._numerator = cast(IntegralLike, _numerator)
else:
self._numerator = _numerator
assert isinstance(_denominator, int)
self._denominator = _denominator
""" a = ['add','radd', 'sub', 'rsub', 'mul', 'rmul',
'truediv', 'rtruediv', 'floordiv', 'rfloordiv',
'mod','rmod', 'pow','rpow', 'pos', 'neg',
'abs', 'trunc', 'lt', 'gt', 'le', 'ge', 'bool',
'ceil', 'floor', 'round']
print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a))
"""
__add__ = _delegate("__add__")
__radd__ = _delegate("__radd__")
__sub__ = _delegate("__sub__")
__rsub__ = _delegate("__rsub__")
__mul__ = _delegate("__mul__")
__rmul__ = _delegate("__rmul__")
__truediv__ = _delegate("__truediv__")
__rtruediv__ = _delegate("__rtruediv__")
__floordiv__ = _delegate("__floordiv__")
__rfloordiv__ = _delegate("__rfloordiv__")
__mod__ = _delegate("__mod__")
__rmod__ = _delegate("__rmod__")
__pow__ = _delegate("__pow__")
__rpow__ = _delegate("__rpow__")
__pos__ = _delegate("__pos__")
__neg__ = _delegate("__neg__")
__abs__ = _delegate("__abs__")
__trunc__ = _delegate("__trunc__")
__lt__ = _delegate("__lt__")
__gt__ = _delegate("__gt__")
__le__ = _delegate("__le__")
__ge__ = _delegate("__ge__")
__bool__ = _delegate("__bool__")
__ceil__ = _delegate("__ceil__")
__floor__ = _delegate("__floor__")
__round__ = _delegate("__round__")
# Python >= 3.11
if hasattr(Fraction, "__int__"):
__int__ = _delegate("__int__")
_LoaderFunc = Callable[["ImageFileDirectory_v2", bytes, bool], Any]
def _register_loader(idx: int, size: int) -> Callable[[_LoaderFunc], _LoaderFunc]:
def decorator(func: _LoaderFunc) -> _LoaderFunc:
from .TiffTags import TYPES
if func.__name__.startswith("load_"):
TYPES[idx] = func.__name__[5:].replace("_", " ")
_load_dispatch[idx] = size, func # noqa: F821
return func
return decorator
def _register_writer(idx: int) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
_write_dispatch[idx] = func # noqa: F821
return func
return decorator
def _register_basic(idx_fmt_name: tuple[int, str, str]) -> None:
from .TiffTags import TYPES
idx, fmt, name = idx_fmt_name
TYPES[idx] = name
size = struct.calcsize(f"={fmt}")
def basic_handler(
self: ImageFileDirectory_v2, data: bytes, legacy_api: bool = True
) -> tuple[Any, ...]:
return self._unpack(f"{len(data) // size}{fmt}", data)
_load_dispatch[idx] = size, basic_handler # noqa: F821
_write_dispatch[idx] = lambda self, *values: ( # noqa: F821
b"".join(self._pack(fmt, value) for value in values)
)
if TYPE_CHECKING:
_IFDv2Base = MutableMapping[int, Any]
else:
_IFDv2Base = MutableMapping
class ImageFileDirectory_v2(_IFDv2Base):
"""This class represents a TIFF tag directory. To speed things up, we
don't decode tags unless they're asked for.
Exposes a dictionary interface of the tags in the directory::
ifd = ImageFileDirectory_v2()
ifd[key] = 'Some Data'
ifd.tagtype[key] = TiffTags.ASCII
print(ifd[key])
'Some Data'
Individual values are returned as the strings or numbers, sequences are
returned as tuples of the values.
The tiff metadata type of each item is stored in a dictionary of
tag types in
:attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype`. The types
are read from a tiff file, guessed from the type added, or added
manually.
Data Structures:
* ``self.tagtype = {}``
* Key: numerical TIFF tag number
* Value: integer corresponding to the data type from
:py:data:`.TiffTags.TYPES`
.. versionadded:: 3.0.0
'Internal' data structures:
* ``self._tags_v2 = {}``
* Key: numerical TIFF tag number
* Value: decoded data, as tuple for multiple values
* ``self._tagdata = {}``
* Key: numerical TIFF tag number
* Value: undecoded byte string from file
* ``self._tags_v1 = {}``
* Key: numerical TIFF tag number
* Value: decoded data in the v1 format
Tags will be found in the private attributes ``self._tagdata``, and in
``self._tags_v2`` once decoded.
``self.legacy_api`` is a value for internal use, and shouldn't be changed
from outside code. In cooperation with
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`, if ``legacy_api``
is true, then decoded tags will be populated into both ``_tags_v1`` and
``_tags_v2``. ``_tags_v2`` will be used if this IFD is used in the TIFF
save routine. Tags should be read from ``_tags_v1`` if
``legacy_api == true``.
"""
_load_dispatch: dict[int, tuple[int, _LoaderFunc]] = {}
_write_dispatch: dict[int, Callable[..., Any]] = {}
def __init__(
self,
ifh: bytes = b"II\x2a\x00\x00\x00\x00\x00",
prefix: bytes | None = None,
group: int | None = None,
) -> None:
"""Initialize an ImageFileDirectory.
To construct an ImageFileDirectory from a real file, pass the 8-byte
magic header to the constructor. To only set the endianness, pass it
as the 'prefix' keyword argument.
:param ifh: One of the accepted magic headers (cf. PREFIXES); also sets
endianness.
:param prefix: Override the endianness of the file.
"""
if not _accept(ifh):
msg = f"not a TIFF file (header {repr(ifh)} not valid)"
raise SyntaxError(msg)
self._prefix = prefix if prefix is not None else ifh[:2]
if self._prefix == MM:
self._endian = ">"
elif self._prefix == II:
self._endian = "<"
else:
msg = "not a TIFF IFD"
raise SyntaxError(msg)
self._bigtiff = ifh[2] == 43
self.group = group
self.tagtype: dict[int, int] = {}
""" Dictionary of tag types """
self.reset()
self.next = (
self._unpack("Q", ifh[8:])[0]
if self._bigtiff
else self._unpack("L", ifh[4:])[0]
)
self._legacy_api = False
prefix = property(lambda self: self._prefix)
offset = property(lambda self: self._offset)
@property
def legacy_api(self) -> bool:
return self._legacy_api
@legacy_api.setter
def legacy_api(self, value: bool) -> NoReturn:
msg = "Not allowing setting of legacy api"
raise Exception(msg)
def reset(self) -> None:
self._tags_v1: dict[int, Any] = {} # will remain empty if legacy_api is false
self._tags_v2: dict[int, Any] = {} # main tag storage
self._tagdata: dict[int, bytes] = {}
self.tagtype = {} # added 2008-06-05 by Florian Hoech
self._next = None
self._offset: int | None = None
def __str__(self) -> str:
return str(dict(self))
def named(self) -> dict[str, Any]:
"""
:returns: dict of name|key: value
Returns the complete tag dictionary, with named tags where possible.
"""
return {
TiffTags.lookup(code, self.group).name: value
for code, value in self.items()
}
def __len__(self) -> int:
return len(set(self._tagdata) | set(self._tags_v2))
def __getitem__(self, tag: int) -> Any:
if tag not in self._tags_v2: # unpack on the fly
data = self._tagdata[tag]
typ = self.tagtype[tag]
size, handler = self._load_dispatch[typ]
self[tag] = handler(self, data, self.legacy_api) # check type
val = self._tags_v2[tag]
if self.legacy_api and not isinstance(val, (tuple, bytes)):
val = (val,)
return val
def __contains__(self, tag: object) -> bool:
return tag in self._tags_v2 or tag in self._tagdata
def __setitem__(self, tag: int, value: Any) -> None:
self._setitem(tag, value, self.legacy_api)
def _setitem(self, tag: int, value: Any, legacy_api: bool) -> None:
basetypes = (Number, bytes, str)
info = TiffTags.lookup(tag, self.group)
values = [value] if isinstance(value, basetypes) else value
if tag not in self.tagtype:
if info.type:
self.tagtype[tag] = info.type
else:
self.tagtype[tag] = TiffTags.UNDEFINED
if all(isinstance(v, IFDRational) for v in values):
for v in values:
assert isinstance(v, IFDRational)
if v < 0:
self.tagtype[tag] = TiffTags.SIGNED_RATIONAL
break
else:
self.tagtype[tag] = TiffTags.RATIONAL
elif all(isinstance(v, int) for v in values):
short = True
signed_short = True
long = True
for v in values:
assert isinstance(v, int)
if short and not (0 <= v < 2**16):
short = False
if signed_short and not (-(2**15) < v < 2**15):
signed_short = False
if long and v < 0:
long = False
if short:
self.tagtype[tag] = TiffTags.SHORT
elif signed_short:
self.tagtype[tag] = TiffTags.SIGNED_SHORT
elif long:
self.tagtype[tag] = TiffTags.LONG
else:
self.tagtype[tag] = TiffTags.SIGNED_LONG
elif all(isinstance(v, float) for v in values):
self.tagtype[tag] = TiffTags.DOUBLE
elif all(isinstance(v, str) for v in values):
self.tagtype[tag] = TiffTags.ASCII
elif all(isinstance(v, bytes) for v in values):
self.tagtype[tag] = TiffTags.BYTE
if self.tagtype[tag] == TiffTags.UNDEFINED:
values = [
v.encode("ascii", "replace") if isinstance(v, str) else v
for v in values
]
elif self.tagtype[tag] == TiffTags.RATIONAL:
values = [float(v) if isinstance(v, int) else v for v in values]
is_ifd = self.tagtype[tag] == TiffTags.LONG and isinstance(values, dict)
if not is_ifd:
values = tuple(
info.cvt_enum(value) if isinstance(value, str) else value
for value in values
)
dest = self._tags_v1 if legacy_api else self._tags_v2
# Three branches:
# Spec'd length == 1, Actual length 1, store as element
# Spec'd length == 1, Actual > 1, Warn and truncate. Formerly barfed.
# No Spec, Actual length 1, Formerly (<4.2) returned a 1 element tuple.
# Don't mess with the legacy api, since it's frozen.
if not is_ifd and (
(info.length == 1)
or self.tagtype[tag] == TiffTags.BYTE
or (info.length is None and len(values) == 1 and not legacy_api)
):
# Don't mess with the legacy api, since it's frozen.
if legacy_api and self.tagtype[tag] in [
TiffTags.RATIONAL,
TiffTags.SIGNED_RATIONAL,
]: # rationals
values = (values,)
try:
(dest[tag],) = values
except ValueError:
# We've got a builtin tag with 1 expected entry
warnings.warn(
f"Metadata Warning, tag {tag} had too many entries: "
f"{len(values)}, expected 1"
)
dest[tag] = values[0]
else:
# Spec'd length > 1 or undefined
# Unspec'd, and length > 1
dest[tag] = values
def __delitem__(self, tag: int) -> None:
self._tags_v2.pop(tag, None)
self._tags_v1.pop(tag, None)
self._tagdata.pop(tag, None)
def __iter__(self) -> Iterator[int]:
return iter(set(self._tagdata) | set(self._tags_v2))
def _unpack(self, fmt: str, data: bytes) -> tuple[Any, ...]:
return struct.unpack(self._endian + fmt, data)
def _pack(self, fmt: str, *values: Any) -> bytes:
return struct.pack(self._endian + fmt, *values)
list(
map(
_register_basic,
[
(TiffTags.SHORT, "H", "short"),
(TiffTags.LONG, "L", "long"),
(TiffTags.SIGNED_BYTE, "b", "signed byte"),
(TiffTags.SIGNED_SHORT, "h", "signed short"),
(TiffTags.SIGNED_LONG, "l", "signed long"),
(TiffTags.FLOAT, "f", "float"),
(TiffTags.DOUBLE, "d", "double"),
(TiffTags.IFD, "L", "long"),
(TiffTags.LONG8, "Q", "long8"),
],
)
)
@_register_loader(1, 1) # Basic type, except for the legacy API.
def load_byte(self, data: bytes, legacy_api: bool = True) -> bytes:
return data
@_register_writer(1) # Basic type, except for the legacy API.
def write_byte(self, data: bytes | int | IFDRational) -> bytes:
if isinstance(data, IFDRational):
data = int(data)
if isinstance(data, int):
data = bytes((data,))
return data
@_register_loader(2, 1)
def load_string(self, data: bytes, legacy_api: bool = True) -> str:
if data.endswith(b"\0"):
data = data[:-1]
return data.decode("latin-1", "replace")
@_register_writer(2)
def write_string(self, value: str | bytes | int) -> bytes:
# remerge of https://github.com/python-pillow/Pillow/pull/1416
if isinstance(value, int):
value = str(value)
if not isinstance(value, bytes):
value = value.encode("ascii", "replace")
return value + b"\0"
@_register_loader(5, 8)
def load_rational(
self, data: bytes, legacy_api: bool = True
) -> tuple[tuple[int, int] | IFDRational, ...]:
vals = self._unpack(f"{len(data) // 4}L", data)
def combine(a: int, b: int) -> tuple[int, int] | IFDRational:
return (a, b) if legacy_api else IFDRational(a, b)
return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2]))
@_register_writer(5)
def write_rational(self, *values: IFDRational) -> bytes:
return b"".join(
self._pack("2L", *_limit_rational(frac, 2**32 - 1)) for frac in values
)
@_register_loader(7, 1)
def load_undefined(self, data: bytes, legacy_api: bool = True) -> bytes:
return data
@_register_writer(7)
def write_undefined(self, value: bytes | int | IFDRational) -> bytes:
if isinstance(value, IFDRational):
value = int(value)
if isinstance(value, int):
value = str(value).encode("ascii", "replace")
return value
@_register_loader(10, 8)
def load_signed_rational(
self, data: bytes, legacy_api: bool = True
) -> tuple[tuple[int, int] | IFDRational, ...]:
vals = self._unpack(f"{len(data) // 4}l", data)
def combine(a: int, b: int) -> tuple[int, int] | IFDRational:
return (a, b) if legacy_api else IFDRational(a, b)
return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2]))
@_register_writer(10)
def write_signed_rational(self, *values: IFDRational) -> bytes:
return b"".join(
self._pack("2l", *_limit_signed_rational(frac, 2**31 - 1, -(2**31)))
for frac in values
)
def _ensure_read(self, fp: IO[bytes], size: int) -> bytes:
ret = fp.read(size)
if len(ret) != size:
msg = (
"Corrupt EXIF data. "
f"Expecting to read {size} bytes but only got {len(ret)}. "
)
raise OSError(msg)
return ret
def load(self, fp: IO[bytes]) -> None:
self.reset()
self._offset = fp.tell()
try:
tag_count = (
self._unpack("Q", self._ensure_read(fp, 8))
if self._bigtiff
else self._unpack("H", self._ensure_read(fp, 2))
)[0]
for i in range(tag_count):
tag, typ, count, data = (
self._unpack("HHQ8s", self._ensure_read(fp, 20))
if self._bigtiff
else self._unpack("HHL4s", self._ensure_read(fp, 12))
)
tagname = TiffTags.lookup(tag, self.group).name
typname = TYPES.get(typ, "unknown")
msg = f"tag: {tagname} ({tag}) - type: {typname} ({typ})"
try:
unit_size, handler = self._load_dispatch[typ]
except KeyError:
logger.debug("%s - unsupported type %s", msg, typ)
continue # ignore unsupported type
size = count * unit_size
if size > (8 if self._bigtiff else 4):
here = fp.tell()
(offset,) = self._unpack("Q" if self._bigtiff else "L", data)
msg += f" Tag Location: {here} - Data Location: {offset}"
fp.seek(offset)
data = ImageFile._safe_read(fp, size)
fp.seek(here)
else:
data = data[:size]
if len(data) != size:
warnings.warn(
"Possibly corrupt EXIF data. "
f"Expecting to read {size} bytes but only got {len(data)}."
f" Skipping tag {tag}"
)
logger.debug(msg)
continue
if not data:
logger.debug(msg)
continue
self._tagdata[tag] = data
self.tagtype[tag] = typ
msg += " - value: "
msg += f"" if size > 32 else repr(data)
logger.debug(msg)
(self.next,) = (
self._unpack("Q", self._ensure_read(fp, 8))
if self._bigtiff
else self._unpack("L", self._ensure_read(fp, 4))
)
except OSError as msg:
warnings.warn(str(msg))
return
def _get_ifh(self) -> bytes:
ifh = self._prefix + self._pack("H", 43 if self._bigtiff else 42)
if self._bigtiff:
ifh += self._pack("HH", 8, 0)
ifh += self._pack("Q", 16) if self._bigtiff else self._pack("L", 8)
return ifh
def tobytes(self, offset: int = 0) -> bytes:
# FIXME What about tagdata?
result = self._pack("Q" if self._bigtiff else "H", len(self._tags_v2))
entries: list[tuple[int, int, int, bytes, bytes]] = []
fmt = "Q" if self._bigtiff else "L"
fmt_size = 8 if self._bigtiff else 4
offset += (
len(result) + len(self._tags_v2) * (20 if self._bigtiff else 12) + fmt_size
)
stripoffsets = None
# pass 1: convert tags to binary format
# always write tags in ascending order
for tag, value in sorted(self._tags_v2.items()):
if tag == STRIPOFFSETS:
stripoffsets = len(entries)
typ = self.tagtype[tag]
logger.debug("Tag %s, Type: %s, Value: %s", tag, typ, repr(value))
is_ifd = typ == TiffTags.LONG and isinstance(value, dict)
if is_ifd:
ifd = ImageFileDirectory_v2(self._get_ifh(), group=tag)
values = self._tags_v2[tag]
for ifd_tag, ifd_value in values.items():
ifd[ifd_tag] = ifd_value
data = ifd.tobytes(offset)
else:
values = value if isinstance(value, tuple) else (value,)
data = self._write_dispatch[typ](self, *values)
tagname = TiffTags.lookup(tag, self.group).name
typname = "ifd" if is_ifd else TYPES.get(typ, "unknown")
msg = f"save: {tagname} ({tag}) - type: {typname} ({typ}) - value: "
msg += f"" if len(data) >= 16 else str(values)
logger.debug(msg)
# count is sum of lengths for string and arbitrary data
if is_ifd:
count = 1
elif typ in [TiffTags.BYTE, TiffTags.ASCII, TiffTags.UNDEFINED]:
count = len(data)
else:
count = len(values)
# figure out if data fits into the entry
if len(data) <= fmt_size:
entries.append((tag, typ, count, data.ljust(fmt_size, b"\0"), b""))
else:
entries.append((tag, typ, count, self._pack(fmt, offset), data))
offset += (len(data) + 1) // 2 * 2 # pad to word
# update strip offset data to point beyond auxiliary data
if stripoffsets is not None:
tag, typ, count, value, data = entries[stripoffsets]
if data:
size, handler = self._load_dispatch[typ]
values = [val + offset for val in handler(self, data, self.legacy_api)]
data = self._write_dispatch[typ](self, *values)
else:
value = self._pack(fmt, self._unpack(fmt, value)[0] + offset)
entries[stripoffsets] = tag, typ, count, value, data
# pass 2: write entries to file
for tag, typ, count, value, data in entries:
logger.debug("%s %s %s %s %s", tag, typ, count, repr(value), repr(data))
result += self._pack(
"HHQ8s" if self._bigtiff else "HHL4s", tag, typ, count, value
)
# -- overwrite here for multi-page --
result += self._pack(fmt, 0) # end of entries
# pass 3: write auxiliary data to file
for tag, typ, count, value, data in entries:
result += data
if len(data) & 1:
result += b"\0"
return result
def save(self, fp: IO[bytes]) -> int:
if fp.tell() == 0: # skip TIFF header on subsequent pages
fp.write(self._get_ifh())
offset = fp.tell()
result = self.tobytes(offset)
fp.write(result)
return offset + len(result)
ImageFileDirectory_v2._load_dispatch = _load_dispatch
ImageFileDirectory_v2._write_dispatch = _write_dispatch
for idx, name in TYPES.items():
name = name.replace(" ", "_")
setattr(ImageFileDirectory_v2, f"load_{name}", _load_dispatch[idx][1])
setattr(ImageFileDirectory_v2, f"write_{name}", _write_dispatch[idx])
del _load_dispatch, _write_dispatch, idx, name
# Legacy ImageFileDirectory support.
class ImageFileDirectory_v1(ImageFileDirectory_v2):
"""This class represents the **legacy** interface to a TIFF tag directory.
Exposes a dictionary interface of the tags in the directory::
ifd = ImageFileDirectory_v1()
ifd[key] = 'Some Data'
ifd.tagtype[key] = TiffTags.ASCII
print(ifd[key])
('Some Data',)
Also contains a dictionary of tag types as read from the tiff image file,
:attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v1.tagtype`.
Values are returned as a tuple.
.. deprecated:: 3.0.0
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._legacy_api = True
tags = property(lambda self: self._tags_v1)
tagdata = property(lambda self: self._tagdata)
# defined in ImageFileDirectory_v2
tagtype: dict[int, int]
"""Dictionary of tag types"""
@classmethod
def from_v2(cls, original: ImageFileDirectory_v2) -> ImageFileDirectory_v1:
"""Returns an
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`
instance with the same data as is contained in the original
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2`
instance.
:returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`
"""
ifd = cls(prefix=original.prefix)
ifd._tagdata = original._tagdata
ifd.tagtype = original.tagtype
ifd.next = original.next # an indicator for multipage tiffs
return ifd
def to_v2(self) -> ImageFileDirectory_v2:
"""Returns an
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2`
instance with the same data as is contained in the original
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`
instance.
:returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2`
"""
ifd = ImageFileDirectory_v2(prefix=self.prefix)
ifd._tagdata = dict(self._tagdata)
ifd.tagtype = dict(self.tagtype)
ifd._tags_v2 = dict(self._tags_v2)
return ifd
def __contains__(self, tag: object) -> bool:
return tag in self._tags_v1 or tag in self._tagdata
def __len__(self) -> int:
return len(set(self._tagdata) | set(self._tags_v1))
def __iter__(self) -> Iterator[int]:
return iter(set(self._tagdata) | set(self._tags_v1))
def __setitem__(self, tag: int, value: Any) -> None:
for legacy_api in (False, True):
self._setitem(tag, value, legacy_api)
def __getitem__(self, tag: int) -> Any:
if tag not in self._tags_v1: # unpack on the fly
data = self._tagdata[tag]
typ = self.tagtype[tag]
size, handler = self._load_dispatch[typ]
for legacy in (False, True):
self._setitem(tag, handler(self, data, legacy), legacy)
val = self._tags_v1[tag]
if not isinstance(val, (tuple, bytes)):
val = (val,)
return val
# undone -- switch this pointer
ImageFileDirectory = ImageFileDirectory_v1
##
# Image plugin for TIFF files.
class TiffImageFile(ImageFile.ImageFile):
format = "TIFF"
format_description = "Adobe TIFF"
_close_exclusive_fp_after_loading = False
def __init__(
self,
fp: StrOrBytesPath | IO[bytes],
filename: str | bytes | None = None,
) -> None:
self.tag_v2: ImageFileDirectory_v2
""" Image file directory (tag dictionary) """
self.tag: ImageFileDirectory_v1
""" Legacy tag entries """
super().__init__(fp, filename)
def _open(self) -> None:
"""Open the first image in a TIFF file"""
# Header
ifh = self.fp.read(8)
if ifh[2] == 43:
ifh += self.fp.read(8)
self.tag_v2 = ImageFileDirectory_v2(ifh)
# setup frame pointers
self.__first = self.__next = self.tag_v2.next
self.__frame = -1
self._fp = self.fp
self._frame_pos: list[int] = []
self._n_frames: int | None = None
logger.debug("*** TiffImageFile._open ***")
logger.debug("- __first: %s", self.__first)
logger.debug("- ifh: %s", repr(ifh)) # Use repr to avoid str(bytes)
# and load the first frame
self._seek(0)
@property
def n_frames(self) -> int:
current_n_frames = self._n_frames
if current_n_frames is None:
current = self.tell()
self._seek(len(self._frame_pos))
while self._n_frames is None:
self._seek(self.tell() + 1)
self.seek(current)
assert self._n_frames is not None
return self._n_frames
def seek(self, frame: int) -> None:
"""Select a given frame as current image"""
if not self._seek_check(frame):
return
self._seek(frame)
if self._im is not None and (
self.im.size != self._tile_size
or self.im.mode != self.mode
or self.readonly
):
self._im = None
def _seek(self, frame: int) -> None:
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.fp = self._fp
while len(self._frame_pos) <= frame:
if not self.__next:
msg = "no more images in TIFF file"
raise EOFError(msg)
logger.debug(
"Seeking to frame %s, on frame %s, __next %s, location: %s",
frame,
self.__frame,
self.__next,
self.fp.tell(),
)
if self.__next >= 2**63:
msg = "Unable to seek to frame"
raise ValueError(msg)
self.fp.seek(self.__next)
self._frame_pos.append(self.__next)
logger.debug("Loading tags, location: %s", self.fp.tell())
self.tag_v2.load(self.fp)
if self.tag_v2.next in self._frame_pos:
# This IFD has already been processed
# Declare this to be the end of the image
self.__next = 0
else:
self.__next = self.tag_v2.next
if self.__next == 0:
self._n_frames = frame + 1
if len(self._frame_pos) == 1:
self.is_animated = self.__next != 0
self.__frame += 1
self.fp.seek(self._frame_pos[frame])
self.tag_v2.load(self.fp)
if XMP in self.tag_v2:
xmp = self.tag_v2[XMP]
if isinstance(xmp, tuple) and len(xmp) == 1:
xmp = xmp[0]
self.info["xmp"] = xmp
elif "xmp" in self.info:
del self.info["xmp"]
self._reload_exif()
# fill the legacy tag/ifd entries
self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2)
self.__frame = frame
self._setup()
def tell(self) -> int:
"""Return the current frame number"""
return self.__frame
def get_photoshop_blocks(self) -> dict[int, dict[str, bytes]]:
"""
Returns a dictionary of Photoshop "Image Resource Blocks".
The keys are the image resource ID. For more information, see
https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577409_pgfId-1037727
:returns: Photoshop "Image Resource Blocks" in a dictionary.
"""
blocks = {}
val = self.tag_v2.get(ExifTags.Base.ImageResources)
if val:
while val.startswith(b"8BIM"):
id = i16(val[4:6])
n = math.ceil((val[6] + 1) / 2) * 2
size = i32(val[6 + n : 10 + n])
data = val[10 + n : 10 + n + size]
blocks[id] = {"data": data}
val = val[math.ceil((10 + n + size) / 2) * 2 :]
return blocks
def load(self) -> Image.core.PixelAccess | None:
if self.tile and self.use_load_libtiff:
return self._load_libtiff()
return super().load()
def load_prepare(self) -> None:
if self._im is None:
Image._decompression_bomb_check(self._tile_size)
self.im = Image.core.new(self.mode, self._tile_size)
ImageFile.ImageFile.load_prepare(self)
def load_end(self) -> None:
# allow closing if we're on the first frame, there's no next
# This is the ImageFile.load path only, libtiff specific below.
if not self.is_animated:
self._close_exclusive_fp_after_loading = True
# load IFD data from fp before it is closed
exif = self.getexif()
for key in TiffTags.TAGS_V2_GROUPS:
if key not in exif:
continue
exif.get_ifd(key)
ImageOps.exif_transpose(self, in_place=True)
if ExifTags.Base.Orientation in self.tag_v2:
del self.tag_v2[ExifTags.Base.Orientation]
def _load_libtiff(self) -> Image.core.PixelAccess | None:
"""Overload method triggered when we detect a compressed tiff
Calls out to libtiff"""
Image.Image.load(self)
self.load_prepare()
if not len(self.tile) == 1:
msg = "Not exactly one tile"
raise OSError(msg)
# (self._compression, (extents tuple),
# 0, (rawmode, self._compression, fp))
extents = self.tile[0][1]
args = self.tile[0][3]
# To be nice on memory footprint, if there's a
# file descriptor, use that instead of reading
# into a string in python.
try:
fp = hasattr(self.fp, "fileno") and self.fp.fileno()
# flush the file descriptor, prevents error on pypy 2.4+
# should also eliminate the need for fp.tell
# in _seek
if hasattr(self.fp, "flush"):
self.fp.flush()
except OSError:
# io.BytesIO have a fileno, but returns an OSError if
# it doesn't use a file descriptor.
fp = False
if fp:
assert isinstance(args, tuple)
args_list = list(args)
args_list[2] = fp
args = tuple(args_list)
decoder = Image._getdecoder(self.mode, "libtiff", args, self.decoderconfig)
try:
decoder.setimage(self.im, extents)
except ValueError as e:
msg = "Couldn't set the image"
raise OSError(msg) from e
close_self_fp = self._exclusive_fp and not self.is_animated
if hasattr(self.fp, "getvalue"):
# We've got a stringio like thing passed in. Yay for all in memory.
# The decoder needs the entire file in one shot, so there's not
# a lot we can do here other than give it the entire file.
# unless we could do something like get the address of the
# underlying string for stringio.
#
# Rearranging for supporting byteio items, since they have a fileno
# that returns an OSError if there's no underlying fp. Easier to
# deal with here by reordering.
logger.debug("have getvalue. just sending in a string from getvalue")
n, err = decoder.decode(self.fp.getvalue())
elif fp:
# we've got a actual file on disk, pass in the fp.
logger.debug("have fileno, calling fileno version of the decoder.")
if not close_self_fp:
self.fp.seek(0)
# Save and restore the file position, because libtiff will move it
# outside of the Python runtime, and that will confuse
# io.BufferedReader and possible others.
# NOTE: This must use os.lseek(), and not fp.tell()/fp.seek(),
# because the buffer read head already may not equal the actual
# file position, and fp.seek() may just adjust it's internal
# pointer and not actually seek the OS file handle.
pos = os.lseek(fp, 0, os.SEEK_CUR)
# 4 bytes, otherwise the trace might error out
n, err = decoder.decode(b"fpfp")
os.lseek(fp, pos, os.SEEK_SET)
else:
# we have something else.
logger.debug("don't have fileno or getvalue. just reading")
self.fp.seek(0)
# UNDONE -- so much for that buffer size thing.
n, err = decoder.decode(self.fp.read())
self.tile = []
self.readonly = 0
self.load_end()
if close_self_fp:
self.fp.close()
self.fp = None # might be shared
if err < 0:
msg = f"decoder error {err}"
raise OSError(msg)
return Image.Image.load(self)
def _setup(self) -> None:
"""Setup this image object based on current tags"""
if 0xBC01 in self.tag_v2:
msg = "Windows Media Photo files not yet supported"
raise OSError(msg)
# extract relevant tags
self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)]
self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1)
# photometric is a required tag, but not everyone is reading
# the specification
photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0)
# old style jpeg compression images most certainly are YCbCr
if self._compression == "tiff_jpeg":
photo = 6
fillorder = self.tag_v2.get(FILLORDER, 1)
logger.debug("*** Summary ***")
logger.debug("- compression: %s", self._compression)
logger.debug("- photometric_interpretation: %s", photo)
logger.debug("- planar_configuration: %s", self._planar_configuration)
logger.debug("- fill_order: %s", fillorder)
logger.debug("- YCbCr subsampling: %s", self.tag_v2.get(YCBCRSUBSAMPLING))
# size
try:
xsize = self.tag_v2[IMAGEWIDTH]
ysize = self.tag_v2[IMAGELENGTH]
except KeyError as e:
msg = "Missing dimensions"
raise TypeError(msg) from e
if not isinstance(xsize, int) or not isinstance(ysize, int):
msg = "Invalid dimensions"
raise ValueError(msg)
self._tile_size = xsize, ysize
orientation = self.tag_v2.get(ExifTags.Base.Orientation)
if orientation in (5, 6, 7, 8):
self._size = ysize, xsize
else:
self._size = xsize, ysize
logger.debug("- size: %s", self.size)
sample_format = self.tag_v2.get(SAMPLEFORMAT, (1,))
if len(sample_format) > 1 and max(sample_format) == min(sample_format) == 1:
# SAMPLEFORMAT is properly per band, so an RGB image will
# be (1,1,1). But, we don't support per band pixel types,
# and anything more than one band is a uint8. So, just
# take the first element. Revisit this if adding support
# for more exotic images.
sample_format = (1,)
bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,))
extra_tuple = self.tag_v2.get(EXTRASAMPLES, ())
if photo in (2, 6, 8): # RGB, YCbCr, LAB
bps_count = 3
elif photo == 5: # CMYK
bps_count = 4
else:
bps_count = 1
bps_count += len(extra_tuple)
bps_actual_count = len(bps_tuple)
samples_per_pixel = self.tag_v2.get(
SAMPLESPERPIXEL,
3 if self._compression == "tiff_jpeg" and photo in (2, 6) else 1,
)
if samples_per_pixel > MAX_SAMPLESPERPIXEL:
# DOS check, samples_per_pixel can be a Long, and we extend the tuple below
logger.error(
"More samples per pixel than can be decoded: %s", samples_per_pixel
)
msg = "Invalid value for samples per pixel"
raise SyntaxError(msg)
if samples_per_pixel < bps_actual_count:
# If a file has more values in bps_tuple than expected,
# remove the excess.
bps_tuple = bps_tuple[:samples_per_pixel]
elif samples_per_pixel > bps_actual_count and bps_actual_count == 1:
# If a file has only one value in bps_tuple, when it should have more,
# presume it is the same number of bits for all of the samples.
bps_tuple = bps_tuple * samples_per_pixel
if len(bps_tuple) != samples_per_pixel:
msg = "unknown data organization"
raise SyntaxError(msg)
# mode: check photometric interpretation and bits per pixel
key = (
self.tag_v2.prefix,
photo,
sample_format,
fillorder,
bps_tuple,
extra_tuple,
)
logger.debug("format key: %s", key)
try:
self._mode, rawmode = OPEN_INFO[key]
except KeyError as e:
logger.debug("- unsupported format")
msg = "unknown pixel mode"
raise SyntaxError(msg) from e
logger.debug("- raw mode: %s", rawmode)
logger.debug("- pil mode: %s", self.mode)
self.info["compression"] = self._compression
xres = self.tag_v2.get(X_RESOLUTION, 1)
yres = self.tag_v2.get(Y_RESOLUTION, 1)
if xres and yres:
resunit = self.tag_v2.get(RESOLUTION_UNIT)
if resunit == 2: # dots per inch
self.info["dpi"] = (xres, yres)
elif resunit == 3: # dots per centimeter. convert to dpi
self.info["dpi"] = (xres * 2.54, yres * 2.54)
elif resunit is None: # used to default to 1, but now 2)
self.info["dpi"] = (xres, yres)
# For backward compatibility,
# we also preserve the old behavior
self.info["resolution"] = xres, yres
else: # No absolute unit of measurement
self.info["resolution"] = xres, yres
# build tile descriptors
x = y = layer = 0
self.tile = []
self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw"
if self.use_load_libtiff:
# Decoder expects entire file as one tile.
# There's a buffer size limit in load (64k)
# so large g4 images will fail if we use that
# function.
#
# Setup the one tile for the whole image, then
# use the _load_libtiff function.
# libtiff handles the fillmode for us, so 1;IR should
# actually be 1;I. Including the R double reverses the
# bits, so stripes of the image are reversed. See
# https://github.com/python-pillow/Pillow/issues/279
if fillorder == 2:
# Replace fillorder with fillorder=1
key = key[:3] + (1,) + key[4:]
logger.debug("format key: %s", key)
# this should always work, since all the
# fillorder==2 modes have a corresponding
# fillorder=1 mode
self._mode, rawmode = OPEN_INFO[key]
# YCbCr images with new jpeg compression with pixels in one plane
# unpacked straight into RGB values
if (
photo == 6
and self._compression == "jpeg"
and self._planar_configuration == 1
):
rawmode = "RGB"
# libtiff always returns the bytes in native order.
# we're expecting image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
elif rawmode == "I;16":
rawmode = "I;16N"
elif rawmode.endswith((";16B", ";16L")):
rawmode = rawmode[:-1] + "N"
# Offset in the tile tuple is 0, we go from 0,0 to
# w,h, and we only do this once -- eds
a = (rawmode, self._compression, False, self.tag_v2.offset)
self.tile.append(ImageFile._Tile("libtiff", (0, 0, xsize, ysize), 0, a))
elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2:
# striped image
if STRIPOFFSETS in self.tag_v2:
offsets = self.tag_v2[STRIPOFFSETS]
h = self.tag_v2.get(ROWSPERSTRIP, ysize)
w = xsize
else:
# tiled image
offsets = self.tag_v2[TILEOFFSETS]
tilewidth = self.tag_v2.get(TILEWIDTH)
h = self.tag_v2.get(TILELENGTH)
if not isinstance(tilewidth, int) or not isinstance(h, int):
msg = "Invalid tile dimensions"
raise ValueError(msg)
w = tilewidth
if w == xsize and h == ysize and self._planar_configuration != 2:
# Every tile covers the image. Only use the last offset
offsets = offsets[-1:]
for offset in offsets:
if x + w > xsize:
stride = w * sum(bps_tuple) / 8 # bytes per line
else:
stride = 0
tile_rawmode = rawmode
if self._planar_configuration == 2:
# each band on it's own layer
tile_rawmode = rawmode[layer]
# adjust stride width accordingly
stride /= bps_count
args = (tile_rawmode, int(stride), 1)
self.tile.append(
ImageFile._Tile(
self._compression,
(x, y, min(x + w, xsize), min(y + h, ysize)),
offset,
args,
)
)
x += w
if x >= xsize:
x, y = 0, y + h
if y >= ysize:
y = 0
layer += 1
else:
logger.debug("- unsupported data organization")
msg = "unknown data organization"
raise SyntaxError(msg)
# Fix up info.
if ICCPROFILE in self.tag_v2:
self.info["icc_profile"] = self.tag_v2[ICCPROFILE]
# fixup palette descriptor
if self.mode in ["P", "PA"]:
palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]]
self.palette = ImagePalette.raw("RGB;L", b"".join(palette))
#
# --------------------------------------------------------------------
# Write TIFF files
# little endian is default except for image modes with
# explicit big endian byte-order
SAVE_INFO = {
# mode => rawmode, byteorder, photometrics,
# sampleformat, bitspersample, extra
"1": ("1", II, 1, 1, (1,), None),
"L": ("L", II, 1, 1, (8,), None),
"LA": ("LA", II, 1, 1, (8, 8), 2),
"P": ("P", II, 3, 1, (8,), None),
"PA": ("PA", II, 3, 1, (8, 8), 2),
"I": ("I;32S", II, 1, 2, (32,), None),
"I;16": ("I;16", II, 1, 1, (16,), None),
"I;16L": ("I;16L", II, 1, 1, (16,), None),
"F": ("F;32F", II, 1, 3, (32,), None),
"RGB": ("RGB", II, 2, 1, (8, 8, 8), None),
"RGBX": ("RGBX", II, 2, 1, (8, 8, 8, 8), 0),
"RGBA": ("RGBA", II, 2, 1, (8, 8, 8, 8), 2),
"CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None),
"YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None),
"LAB": ("LAB", II, 8, 1, (8, 8, 8), None),
"I;16B": ("I;16B", MM, 1, 1, (16,), None),
}
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
try:
rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode]
except KeyError as e:
msg = f"cannot write mode {im.mode} as TIFF"
raise OSError(msg) from e
encoderinfo = im.encoderinfo
encoderconfig = im.encoderconfig
ifd = ImageFileDirectory_v2(prefix=prefix)
if encoderinfo.get("big_tiff"):
ifd._bigtiff = True
try:
compression = encoderinfo["compression"]
except KeyError:
compression = im.info.get("compression")
if isinstance(compression, int):
# compression value may be from BMP. Ignore it
compression = None
if compression is None:
compression = "raw"
elif compression == "tiff_jpeg":
# OJPEG is obsolete, so use new-style JPEG compression instead
compression = "jpeg"
elif compression == "tiff_deflate":
compression = "tiff_adobe_deflate"
libtiff = WRITE_LIBTIFF or compression != "raw"
# required for color libtiff images
ifd[PLANAR_CONFIGURATION] = 1
ifd[IMAGEWIDTH] = im.size[0]
ifd[IMAGELENGTH] = im.size[1]
# write any arbitrary tags passed in as an ImageFileDirectory
if "tiffinfo" in encoderinfo:
info = encoderinfo["tiffinfo"]
elif "exif" in encoderinfo:
info = encoderinfo["exif"]
if isinstance(info, bytes):
exif = Image.Exif()
exif.load(info)
info = exif
else:
info = {}
logger.debug("Tiffinfo Keys: %s", list(info))
if isinstance(info, ImageFileDirectory_v1):
info = info.to_v2()
for key in info:
if isinstance(info, Image.Exif) and key in TiffTags.TAGS_V2_GROUPS:
ifd[key] = info.get_ifd(key)
else:
ifd[key] = info.get(key)
try:
ifd.tagtype[key] = info.tagtype[key]
except Exception:
pass # might not be an IFD. Might not have populated type
legacy_ifd = {}
if hasattr(im, "tag"):
legacy_ifd = im.tag.to_v2()
supplied_tags = {**legacy_ifd, **getattr(im, "tag_v2", {})}
for tag in (
# IFD offset that may not be correct in the saved image
EXIFIFD,
# Determined by the image format and should not be copied from legacy_ifd.
SAMPLEFORMAT,
):
if tag in supplied_tags:
del supplied_tags[tag]
# additions written by Greg Couch, gregc@cgl.ucsf.edu
# inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com
if hasattr(im, "tag_v2"):
# preserve tags from original TIFF image file
for key in (
RESOLUTION_UNIT,
X_RESOLUTION,
Y_RESOLUTION,
IPTC_NAA_CHUNK,
PHOTOSHOP_CHUNK,
XMP,
):
if key in im.tag_v2:
if key == IPTC_NAA_CHUNK and im.tag_v2.tagtype[key] not in (
TiffTags.BYTE,
TiffTags.UNDEFINED,
):
del supplied_tags[key]
else:
ifd[key] = im.tag_v2[key]
ifd.tagtype[key] = im.tag_v2.tagtype[key]
# preserve ICC profile (should also work when saving other formats
# which support profiles as TIFF) -- 2008-06-06 Florian Hoech
icc = encoderinfo.get("icc_profile", im.info.get("icc_profile"))
if icc:
ifd[ICCPROFILE] = icc
for key, name in [
(IMAGEDESCRIPTION, "description"),
(X_RESOLUTION, "resolution"),
(Y_RESOLUTION, "resolution"),
(X_RESOLUTION, "x_resolution"),
(Y_RESOLUTION, "y_resolution"),
(RESOLUTION_UNIT, "resolution_unit"),
(SOFTWARE, "software"),
(DATE_TIME, "date_time"),
(ARTIST, "artist"),
(COPYRIGHT, "copyright"),
]:
if name in encoderinfo:
ifd[key] = encoderinfo[name]
dpi = encoderinfo.get("dpi")
if dpi:
ifd[RESOLUTION_UNIT] = 2
ifd[X_RESOLUTION] = dpi[0]
ifd[Y_RESOLUTION] = dpi[1]
if bits != (1,):
ifd[BITSPERSAMPLE] = bits
if len(bits) != 1:
ifd[SAMPLESPERPIXEL] = len(bits)
if extra is not None:
ifd[EXTRASAMPLES] = extra
if format != 1:
ifd[SAMPLEFORMAT] = format
if PHOTOMETRIC_INTERPRETATION not in ifd:
ifd[PHOTOMETRIC_INTERPRETATION] = photo
elif im.mode in ("1", "L") and ifd[PHOTOMETRIC_INTERPRETATION] == 0:
if im.mode == "1":
inverted_im = im.copy()
px = inverted_im.load()
if px is not None:
for y in range(inverted_im.height):
for x in range(inverted_im.width):
px[x, y] = 0 if px[x, y] == 255 else 255
im = inverted_im
else:
im = ImageOps.invert(im)
if im.mode in ["P", "PA"]:
lut = im.im.getpalette("RGB", "RGB;L")
colormap = []
colors = len(lut) // 3
for i in range(3):
colormap += [v * 256 for v in lut[colors * i : colors * (i + 1)]]
colormap += [0] * (256 - colors)
ifd[COLORMAP] = colormap
# data orientation
w, h = ifd[IMAGEWIDTH], ifd[IMAGELENGTH]
stride = len(bits) * ((w * bits[0] + 7) // 8)
if ROWSPERSTRIP not in ifd:
# aim for given strip size (64 KB by default) when using libtiff writer
if libtiff:
im_strip_size = encoderinfo.get("strip_size", STRIP_SIZE)
rows_per_strip = 1 if stride == 0 else min(im_strip_size // stride, h)
# JPEG encoder expects multiple of 8 rows
if compression == "jpeg":
rows_per_strip = min(((rows_per_strip + 7) // 8) * 8, h)
else:
rows_per_strip = h
if rows_per_strip == 0:
rows_per_strip = 1
ifd[ROWSPERSTRIP] = rows_per_strip
strip_byte_counts = 1 if stride == 0 else stride * ifd[ROWSPERSTRIP]
strips_per_image = (h + ifd[ROWSPERSTRIP] - 1) // ifd[ROWSPERSTRIP]
if strip_byte_counts >= 2**16:
ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG
ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + (
stride * h - strip_byte_counts * (strips_per_image - 1),
)
ifd[STRIPOFFSETS] = tuple(
range(0, strip_byte_counts * strips_per_image, strip_byte_counts)
) # this is adjusted by IFD writer
# no compression by default:
ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1)
if im.mode == "YCbCr":
for tag, default_value in {
YCBCRSUBSAMPLING: (1, 1),
REFERENCEBLACKWHITE: (0, 255, 128, 255, 128, 255),
}.items():
ifd.setdefault(tag, default_value)
blocklist = [TILEWIDTH, TILELENGTH, TILEOFFSETS, TILEBYTECOUNTS]
if libtiff:
if "quality" in encoderinfo:
quality = encoderinfo["quality"]
if not isinstance(quality, int) or quality < 0 or quality > 100:
msg = "Invalid quality setting"
raise ValueError(msg)
if compression != "jpeg":
msg = "quality setting only supported for 'jpeg' compression"
raise ValueError(msg)
ifd[JPEGQUALITY] = quality
logger.debug("Saving using libtiff encoder")
logger.debug("Items: %s", sorted(ifd.items()))
_fp = 0
if hasattr(fp, "fileno"):
try:
fp.seek(0)
_fp = fp.fileno()
except io.UnsupportedOperation:
pass
# optional types for non core tags
types = {}
# STRIPOFFSETS and STRIPBYTECOUNTS are added by the library
# based on the data in the strip.
# OSUBFILETYPE is deprecated.
# The other tags expect arrays with a certain length (fixed or depending on
# BITSPERSAMPLE, etc), passing arrays with a different length will result in
# segfaults. Block these tags until we add extra validation.
# SUBIFD may also cause a segfault.
blocklist += [
OSUBFILETYPE,
REFERENCEBLACKWHITE,
STRIPBYTECOUNTS,
STRIPOFFSETS,
TRANSFERFUNCTION,
SUBIFD,
]
# bits per sample is a single short in the tiff directory, not a list.
atts: dict[int, Any] = {BITSPERSAMPLE: bits[0]}
# Merge the ones that we have with (optional) more bits from
# the original file, e.g x,y resolution so that we can
# save(load('')) == original file.
for tag, value in itertools.chain(ifd.items(), supplied_tags.items()):
# Libtiff can only process certain core items without adding
# them to the custom dictionary.
# Custom items are supported for int, float, unicode, string and byte
# values. Other types and tuples require a tagtype.
if tag not in TiffTags.LIBTIFF_CORE:
if not getattr(Image.core, "libtiff_support_custom_tags", False):
continue
if tag in TiffTags.TAGS_V2_GROUPS:
types[tag] = TiffTags.LONG8
elif tag in ifd.tagtype:
types[tag] = ifd.tagtype[tag]
elif not (isinstance(value, (int, float, str, bytes))):
continue
else:
type = TiffTags.lookup(tag).type
if type:
types[tag] = type
if tag not in atts and tag not in blocklist:
if isinstance(value, str):
atts[tag] = value.encode("ascii", "replace") + b"\0"
elif isinstance(value, IFDRational):
atts[tag] = float(value)
else:
atts[tag] = value
if SAMPLEFORMAT in atts and len(atts[SAMPLEFORMAT]) == 1:
atts[SAMPLEFORMAT] = atts[SAMPLEFORMAT][0]
logger.debug("Converted items: %s", sorted(atts.items()))
# libtiff always expects the bytes in native order.
# we're storing image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if im.mode in ("I;16", "I;16B", "I;16L"):
rawmode = "I;16N"
# Pass tags as sorted list so that the tags are set in a fixed order.
# This is required by libtiff for some tags. For example, the JPEGQUALITY
# pseudo tag requires that the COMPRESS tag was already set.
tags = list(atts.items())
tags.sort()
a = (rawmode, compression, _fp, filename, tags, types)
encoder = Image._getencoder(im.mode, "libtiff", a, encoderconfig)
encoder.setimage(im.im, (0, 0) + im.size)
while True:
errcode, data = encoder.encode(ImageFile.MAXBLOCK)[1:]
if not _fp:
fp.write(data)
if errcode:
break
if errcode < 0:
msg = f"encoder error {errcode} when writing image file"
raise OSError(msg)
else:
for tag in blocklist:
del ifd[tag]
offset = ifd.save(fp)
ImageFile._save(
im,
fp,
[ImageFile._Tile("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))],
)
# -- helper for multi-page save --
if "_debug_multipage" in encoderinfo:
# just to access o32 and o16 (using correct byte order)
setattr(im, "_debug_multipage", ifd)
class AppendingTiffWriter(io.BytesIO):
fieldSizes = [
0, # None
1, # byte
1, # ascii
2, # short
4, # long
8, # rational
1, # sbyte
1, # undefined
2, # sshort
4, # slong
8, # srational
4, # float
8, # double
4, # ifd
2, # unicode
4, # complex
8, # long8
]
Tags = {
273, # StripOffsets
288, # FreeOffsets
324, # TileOffsets
519, # JPEGQTables
520, # JPEGDCTables
521, # JPEGACTables
}
def __init__(self, fn: StrOrBytesPath | IO[bytes], new: bool = False) -> None:
self.f: IO[bytes]
if is_path(fn):
self.name = fn
self.close_fp = True
try:
self.f = open(fn, "w+b" if new else "r+b")
except OSError:
self.f = open(fn, "w+b")
else:
self.f = cast(IO[bytes], fn)
self.close_fp = False
self.beginning = self.f.tell()
self.setup()
def setup(self) -> None:
# Reset everything.
self.f.seek(self.beginning, os.SEEK_SET)
self.whereToWriteNewIFDOffset: int | None = None
self.offsetOfNewPage = 0
self.IIMM = iimm = self.f.read(4)
self._bigtiff = b"\x2b" in iimm
if not iimm:
# empty file - first page
self.isFirst = True
return
self.isFirst = False
if iimm not in PREFIXES:
msg = "Invalid TIFF file header"
raise RuntimeError(msg)
self.setEndian("<" if iimm.startswith(II) else ">")
if self._bigtiff:
self.f.seek(4, os.SEEK_CUR)
self.skipIFDs()
self.goToEnd()
def finalize(self) -> None:
if self.isFirst:
return
# fix offsets
self.f.seek(self.offsetOfNewPage)
iimm = self.f.read(4)
if not iimm:
# Make it easy to finish a frame without committing to a new one.
return
if iimm != self.IIMM:
msg = "IIMM of new page doesn't match IIMM of first page"
raise RuntimeError(msg)
if self._bigtiff:
self.f.seek(4, os.SEEK_CUR)
ifd_offset = self._read(8 if self._bigtiff else 4)
ifd_offset += self.offsetOfNewPage
assert self.whereToWriteNewIFDOffset is not None
self.f.seek(self.whereToWriteNewIFDOffset)
self._write(ifd_offset, 8 if self._bigtiff else 4)
self.f.seek(ifd_offset)
self.fixIFD()
def newFrame(self) -> None:
# Call this to finish a frame.
self.finalize()
self.setup()
def __enter__(self) -> AppendingTiffWriter:
return self
def __exit__(self, *args: object) -> None:
if self.close_fp:
self.close()
def tell(self) -> int:
return self.f.tell() - self.offsetOfNewPage
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int:
"""
:param offset: Distance to seek.
:param whence: Whether the distance is relative to the start,
end or current position.
:returns: The resulting position, relative to the start.
"""
if whence == os.SEEK_SET:
offset += self.offsetOfNewPage
self.f.seek(offset, whence)
return self.tell()
def goToEnd(self) -> None:
self.f.seek(0, os.SEEK_END)
pos = self.f.tell()
# pad to 16 byte boundary
pad_bytes = 16 - pos % 16
if 0 < pad_bytes < 16:
self.f.write(bytes(pad_bytes))
self.offsetOfNewPage = self.f.tell()
def setEndian(self, endian: str) -> None:
self.endian = endian
self.longFmt = f"{self.endian}L"
self.shortFmt = f"{self.endian}H"
self.tagFormat = f"{self.endian}HH" + ("Q" if self._bigtiff else "L")
def skipIFDs(self) -> None:
while True:
ifd_offset = self._read(8 if self._bigtiff else 4)
if ifd_offset == 0:
self.whereToWriteNewIFDOffset = self.f.tell() - (
8 if self._bigtiff else 4
)
break
self.f.seek(ifd_offset)
num_tags = self._read(8 if self._bigtiff else 2)
self.f.seek(num_tags * (20 if self._bigtiff else 12), os.SEEK_CUR)
def write(self, data: Buffer, /) -> int:
return self.f.write(data)
def _fmt(self, field_size: int) -> str:
try:
return {2: "H", 4: "L", 8: "Q"}[field_size]
except KeyError:
msg = "offset is not supported"
raise RuntimeError(msg)
def _read(self, field_size: int) -> int:
(value,) = struct.unpack(
self.endian + self._fmt(field_size), self.f.read(field_size)
)
return value
def readShort(self) -> int:
return self._read(2)
def readLong(self) -> int:
return self._read(4)
@staticmethod
def _verify_bytes_written(bytes_written: int | None, expected: int) -> None:
if bytes_written is not None and bytes_written != expected:
msg = f"wrote only {bytes_written} bytes but wanted {expected}"
raise RuntimeError(msg)
def _rewriteLast(
self, value: int, field_size: int, new_field_size: int = 0
) -> None:
self.f.seek(-field_size, os.SEEK_CUR)
if not new_field_size:
new_field_size = field_size
bytes_written = self.f.write(
struct.pack(self.endian + self._fmt(new_field_size), value)
)
self._verify_bytes_written(bytes_written, new_field_size)
def rewriteLastShortToLong(self, value: int) -> None:
self._rewriteLast(value, 2, 4)
def rewriteLastShort(self, value: int) -> None:
return self._rewriteLast(value, 2)
def rewriteLastLong(self, value: int) -> None:
return self._rewriteLast(value, 4)
def _write(self, value: int, field_size: int) -> None:
bytes_written = self.f.write(
struct.pack(self.endian + self._fmt(field_size), value)
)
self._verify_bytes_written(bytes_written, field_size)
def writeShort(self, value: int) -> None:
self._write(value, 2)
def writeLong(self, value: int) -> None:
self._write(value, 4)
def close(self) -> None:
self.finalize()
if self.close_fp:
self.f.close()
def fixIFD(self) -> None:
num_tags = self._read(8 if self._bigtiff else 2)
for i in range(num_tags):
tag, field_type, count = struct.unpack(
self.tagFormat, self.f.read(12 if self._bigtiff else 8)
)
field_size = self.fieldSizes[field_type]
total_size = field_size * count
fmt_size = 8 if self._bigtiff else 4
is_local = total_size <= fmt_size
if not is_local:
offset = self._read(fmt_size) + self.offsetOfNewPage
self._rewriteLast(offset, fmt_size)
if tag in self.Tags:
cur_pos = self.f.tell()
logger.debug(
"fixIFD: %s (%d) - type: %s (%d) - type size: %d - count: %d",
TiffTags.lookup(tag).name,
tag,
TYPES.get(field_type, "unknown"),
field_type,
field_size,
count,
)
if is_local:
self._fixOffsets(count, field_size)
self.f.seek(cur_pos + fmt_size)
else:
self.f.seek(offset)
self._fixOffsets(count, field_size)
self.f.seek(cur_pos)
elif is_local:
# skip the locally stored value that is not an offset
self.f.seek(fmt_size, os.SEEK_CUR)
def _fixOffsets(self, count: int, field_size: int) -> None:
for i in range(count):
offset = self._read(field_size)
offset += self.offsetOfNewPage
new_field_size = 0
if self._bigtiff and field_size in (2, 4) and offset >= 2**32:
# offset is now too large - we must convert long to long8
new_field_size = 8
elif field_size == 2 and offset >= 2**16:
# offset is now too large - we must convert short to long
new_field_size = 4
if new_field_size:
if count != 1:
msg = "not implemented"
raise RuntimeError(msg) # XXX TODO
# simple case - the offset is just one and therefore it is
# local (not referenced with another offset)
self._rewriteLast(offset, field_size, new_field_size)
# Move back past the new offset, past 'count', and before 'field_type'
rewind = -new_field_size - 4 - 2
self.f.seek(rewind, os.SEEK_CUR)
self.writeShort(new_field_size) # rewrite the type
self.f.seek(2 - rewind, os.SEEK_CUR)
else:
self._rewriteLast(offset, field_size)
def fixOffsets(
self, count: int, isShort: bool = False, isLong: bool = False
) -> None:
if isShort:
field_size = 2
elif isLong:
field_size = 4
else:
field_size = 0
return self._fixOffsets(count, field_size)
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
append_images = list(im.encoderinfo.get("append_images", []))
if not hasattr(im, "n_frames") and not append_images:
return _save(im, fp, filename)
cur_idx = im.tell()
try:
with AppendingTiffWriter(fp) as tf:
for ims in [im] + append_images:
encoderinfo = ims._attach_default_encoderinfo(im)
if not hasattr(ims, "encoderconfig"):
ims.encoderconfig = ()
nfr = getattr(ims, "n_frames", 1)
for idx in range(nfr):
ims.seek(idx)
ims.load()
_save(ims, tf, filename)
tf.newFrame()
ims.encoderinfo = encoderinfo
finally:
im.seek(cur_idx)
#
# --------------------------------------------------------------------
# Register
Image.register_open(TiffImageFile.format, TiffImageFile, _accept)
Image.register_save(TiffImageFile.format, _save)
Image.register_save_all(TiffImageFile.format, _save_all)
Image.register_extensions(TiffImageFile.format, [".tif", ".tiff"])
Image.register_mime(TiffImageFile.format, "image/tiff")
venv\Lib\site-packages\PIL\TiffTags.py
#
# The Python Imaging Library.
# $Id$
#
# TIFF tags
#
# This module provides clear-text names for various well-known
# TIFF tags. the TIFF codec works just fine without it.
#
# Copyright (c) Secret Labs AB 1999.
#
# See the README file for information on usage and redistribution.
#
##
# This module provides constants and clear-text names for various
# well-known TIFF tags.
##
from __future__ import annotations
from typing import NamedTuple
class _TagInfo(NamedTuple):
value: int | None
name: str
type: int | None
length: int | None
enum: dict[str, int]
class TagInfo(_TagInfo):
__slots__: list[str] = []
def __new__(
cls,
value: int | None = None,
name: str = "unknown",
type: int | None = None,
length: int | None = None,
enum: dict[str, int] | None = None,
) -> TagInfo:
return super().__new__(cls, value, name, type, length, enum or {})
def cvt_enum(self, value: str) -> int | str:
# Using get will call hash(value), which can be expensive
# for some types (e.g. Fraction). Since self.enum is rarely
# used, it's usually better to test it first.
return self.enum.get(value, value) if self.enum else value
def lookup(tag: int, group: int | None = None) -> TagInfo:
"""
:param tag: Integer tag number
:param group: Which :py:data:`~PIL.TiffTags.TAGS_V2_GROUPS` to look in
.. versionadded:: 8.3.0
:returns: Taginfo namedtuple, From the ``TAGS_V2`` info if possible,
otherwise just populating the value and name from ``TAGS``.
If the tag is not recognized, "unknown" is returned for the name
"""
if group is not None:
info = TAGS_V2_GROUPS[group].get(tag) if group in TAGS_V2_GROUPS else None
else:
info = TAGS_V2.get(tag)
return info or TagInfo(tag, TAGS.get(tag, "unknown"))
##
# Map tag numbers to tag info.
#
# id: (Name, Type, Length[, enum_values])
#
# The length here differs from the length in the tiff spec. For
# numbers, the tiff spec is for the number of fields returned. We
# agree here. For string-like types, the tiff spec uses the length of
# field in bytes. In Pillow, we are using the number of expected
# fields, in general 1 for string-like types.
BYTE = 1
ASCII = 2
SHORT = 3
LONG = 4
RATIONAL = 5
SIGNED_BYTE = 6
UNDEFINED = 7
SIGNED_SHORT = 8
SIGNED_LONG = 9
SIGNED_RATIONAL = 10
FLOAT = 11
DOUBLE = 12
IFD = 13
LONG8 = 16
_tags_v2: dict[int, tuple[str, int, int] | tuple[str, int, int, dict[str, int]]] = {
254: ("NewSubfileType", LONG, 1),
255: ("SubfileType", SHORT, 1),
256: ("ImageWidth", LONG, 1),
257: ("ImageLength", LONG, 1),
258: ("BitsPerSample", SHORT, 0),
259: (
"Compression",
SHORT,
1,
{
"Uncompressed": 1,
"CCITT 1d": 2,
"Group 3 Fax": 3,
"Group 4 Fax": 4,
"LZW": 5,
"JPEG": 6,
"PackBits": 32773,
},
),
262: (
"PhotometricInterpretation",
SHORT,
1,
{
"WhiteIsZero": 0,
"BlackIsZero": 1,
"RGB": 2,
"RGB Palette": 3,
"Transparency Mask": 4,
"CMYK": 5,
"YCbCr": 6,
"CieLAB": 8,
"CFA": 32803, # TIFF/EP, Adobe DNG
"LinearRaw": 32892, # Adobe DNG
},
),
263: ("Threshholding", SHORT, 1),
264: ("CellWidth", SHORT, 1),
265: ("CellLength", SHORT, 1),
266: ("FillOrder", SHORT, 1),
269: ("DocumentName", ASCII, 1),
270: ("ImageDescription", ASCII, 1),
271: ("Make", ASCII, 1),
272: ("Model", ASCII, 1),
273: ("StripOffsets", LONG, 0),
274: ("Orientation", SHORT, 1),
277: ("SamplesPerPixel", SHORT, 1),
278: ("RowsPerStrip", LONG, 1),
279: ("StripByteCounts", LONG, 0),
280: ("MinSampleValue", SHORT, 0),
281: ("MaxSampleValue", SHORT, 0),
282: ("XResolution", RATIONAL, 1),
283: ("YResolution", RATIONAL, 1),
284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}),
285: ("PageName", ASCII, 1),
286: ("XPosition", RATIONAL, 1),
287: ("YPosition", RATIONAL, 1),
288: ("FreeOffsets", LONG, 1),
289: ("FreeByteCounts", LONG, 1),
290: ("GrayResponseUnit", SHORT, 1),
291: ("GrayResponseCurve", SHORT, 0),
292: ("T4Options", LONG, 1),
293: ("T6Options", LONG, 1),
296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}),
297: ("PageNumber", SHORT, 2),
301: ("TransferFunction", SHORT, 0),
305: ("Software", ASCII, 1),
306: ("DateTime", ASCII, 1),
315: ("Artist", ASCII, 1),
316: ("HostComputer", ASCII, 1),
317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}),
318: ("WhitePoint", RATIONAL, 2),
319: ("PrimaryChromaticities", RATIONAL, 6),
320: ("ColorMap", SHORT, 0),
321: ("HalftoneHints", SHORT, 2),
322: ("TileWidth", LONG, 1),
323: ("TileLength", LONG, 1),
324: ("TileOffsets", LONG, 0),
325: ("TileByteCounts", LONG, 0),
330: ("SubIFDs", LONG, 0),
332: ("InkSet", SHORT, 1),
333: ("InkNames", ASCII, 1),
334: ("NumberOfInks", SHORT, 1),
336: ("DotRange", SHORT, 0),
337: ("TargetPrinter", ASCII, 1),
338: ("ExtraSamples", SHORT, 0),
339: ("SampleFormat", SHORT, 0),
340: ("SMinSampleValue", DOUBLE, 0),
341: ("SMaxSampleValue", DOUBLE, 0),
342: ("TransferRange", SHORT, 6),
347: ("JPEGTables", UNDEFINED, 1),
# obsolete JPEG tags
512: ("JPEGProc", SHORT, 1),
513: ("JPEGInterchangeFormat", LONG, 1),
514: ("JPEGInterchangeFormatLength", LONG, 1),
515: ("JPEGRestartInterval", SHORT, 1),
517: ("JPEGLosslessPredictors", SHORT, 0),
518: ("JPEGPointTransforms", SHORT, 0),
519: ("JPEGQTables", LONG, 0),
520: ("JPEGDCTables", LONG, 0),
521: ("JPEGACTables", LONG, 0),
529: ("YCbCrCoefficients", RATIONAL, 3),
530: ("YCbCrSubSampling", SHORT, 2),
531: ("YCbCrPositioning", SHORT, 1),
532: ("ReferenceBlackWhite", RATIONAL, 6),
700: ("XMP", BYTE, 0),
33432: ("Copyright", ASCII, 1),
33723: ("IptcNaaInfo", UNDEFINED, 1),
34377: ("PhotoshopInfo", BYTE, 0),
# FIXME add more tags here
34665: ("ExifIFD", LONG, 1),
34675: ("ICCProfile", UNDEFINED, 1),
34853: ("GPSInfoIFD", LONG, 1),
36864: ("ExifVersion", UNDEFINED, 1),
37724: ("ImageSourceData", UNDEFINED, 1),
40965: ("InteroperabilityIFD", LONG, 1),
41730: ("CFAPattern", UNDEFINED, 1),
# MPInfo
45056: ("MPFVersion", UNDEFINED, 1),
45057: ("NumberOfImages", LONG, 1),
45058: ("MPEntry", UNDEFINED, 1),
45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check
45060: ("TotalFrames", LONG, 1),
45313: ("MPIndividualNum", LONG, 1),
45569: ("PanOrientation", LONG, 1),
45570: ("PanOverlap_H", RATIONAL, 1),
45571: ("PanOverlap_V", RATIONAL, 1),
45572: ("BaseViewpointNum", LONG, 1),
45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1),
45574: ("BaselineLength", RATIONAL, 1),
45575: ("VerticalDivergence", SIGNED_RATIONAL, 1),
45576: ("AxisDistance_X", SIGNED_RATIONAL, 1),
45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1),
45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1),
45579: ("YawAngle", SIGNED_RATIONAL, 1),
45580: ("PitchAngle", SIGNED_RATIONAL, 1),
45581: ("RollAngle", SIGNED_RATIONAL, 1),
40960: ("FlashPixVersion", UNDEFINED, 1),
50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}),
50780: ("BestQualityScale", RATIONAL, 1),
50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one
50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006
}
_tags_v2_groups = {
# ExifIFD
34665: {
36864: ("ExifVersion", UNDEFINED, 1),
40960: ("FlashPixVersion", UNDEFINED, 1),
40965: ("InteroperabilityIFD", LONG, 1),
41730: ("CFAPattern", UNDEFINED, 1),
},
# GPSInfoIFD
34853: {
0: ("GPSVersionID", BYTE, 4),
1: ("GPSLatitudeRef", ASCII, 2),
2: ("GPSLatitude", RATIONAL, 3),
3: ("GPSLongitudeRef", ASCII, 2),
4: ("GPSLongitude", RATIONAL, 3),
5: ("GPSAltitudeRef", BYTE, 1),
6: ("GPSAltitude", RATIONAL, 1),
7: ("GPSTimeStamp", RATIONAL, 3),
8: ("GPSSatellites", ASCII, 0),
9: ("GPSStatus", ASCII, 2),
10: ("GPSMeasureMode", ASCII, 2),
11: ("GPSDOP", RATIONAL, 1),
12: ("GPSSpeedRef", ASCII, 2),
13: ("GPSSpeed", RATIONAL, 1),
14: ("GPSTrackRef", ASCII, 2),
15: ("GPSTrack", RATIONAL, 1),
16: ("GPSImgDirectionRef", ASCII, 2),
17: ("GPSImgDirection", RATIONAL, 1),
18: ("GPSMapDatum", ASCII, 0),
19: ("GPSDestLatitudeRef", ASCII, 2),
20: ("GPSDestLatitude", RATIONAL, 3),
21: ("GPSDestLongitudeRef", ASCII, 2),
22: ("GPSDestLongitude", RATIONAL, 3),
23: ("GPSDestBearingRef", ASCII, 2),
24: ("GPSDestBearing", RATIONAL, 1),
25: ("GPSDestDistanceRef", ASCII, 2),
26: ("GPSDestDistance", RATIONAL, 1),
27: ("GPSProcessingMethod", UNDEFINED, 0),
28: ("GPSAreaInformation", UNDEFINED, 0),
29: ("GPSDateStamp", ASCII, 11),
30: ("GPSDifferential", SHORT, 1),
},
# InteroperabilityIFD
40965: {1: ("InteropIndex", ASCII, 1), 2: ("InteropVersion", UNDEFINED, 1)},
}
# Legacy Tags structure
# these tags aren't included above, but were in the previous versions
TAGS: dict[int | tuple[int, int], str] = {
347: "JPEGTables",
700: "XMP",
# Additional Exif Info
32932: "Wang Annotation",
33434: "ExposureTime",
33437: "FNumber",
33445: "MD FileTag",
33446: "MD ScalePixel",
33447: "MD ColorTable",
33448: "MD LabName",
33449: "MD SampleInfo",
33450: "MD PrepDate",
33451: "MD PrepTime",
33452: "MD FileUnits",
33550: "ModelPixelScaleTag",
33723: "IptcNaaInfo",
33918: "INGR Packet Data Tag",
33919: "INGR Flag Registers",
33920: "IrasB Transformation Matrix",
33922: "ModelTiepointTag",
34264: "ModelTransformationTag",
34377: "PhotoshopInfo",
34735: "GeoKeyDirectoryTag",
34736: "GeoDoubleParamsTag",
34737: "GeoAsciiParamsTag",
34850: "ExposureProgram",
34852: "SpectralSensitivity",
34855: "ISOSpeedRatings",
34856: "OECF",
34864: "SensitivityType",
34865: "StandardOutputSensitivity",
34866: "RecommendedExposureIndex",
34867: "ISOSpeed",
34868: "ISOSpeedLatitudeyyy",
34869: "ISOSpeedLatitudezzz",
34908: "HylaFAX FaxRecvParams",
34909: "HylaFAX FaxSubAddress",
34910: "HylaFAX FaxRecvTime",
36864: "ExifVersion",
36867: "DateTimeOriginal",
36868: "DateTimeDigitized",
37121: "ComponentsConfiguration",
37122: "CompressedBitsPerPixel",
37724: "ImageSourceData",
37377: "ShutterSpeedValue",
37378: "ApertureValue",
37379: "BrightnessValue",
37380: "ExposureBiasValue",
37381: "MaxApertureValue",
37382: "SubjectDistance",
37383: "MeteringMode",
37384: "LightSource",
37385: "Flash",
37386: "FocalLength",
37396: "SubjectArea",
37500: "MakerNote",
37510: "UserComment",
37520: "SubSec",
37521: "SubSecTimeOriginal",
37522: "SubsecTimeDigitized",
40960: "FlashPixVersion",
40961: "ColorSpace",
40962: "PixelXDimension",
40963: "PixelYDimension",
40964: "RelatedSoundFile",
40965: "InteroperabilityIFD",
41483: "FlashEnergy",
41484: "SpatialFrequencyResponse",
41486: "FocalPlaneXResolution",
41487: "FocalPlaneYResolution",
41488: "FocalPlaneResolutionUnit",
41492: "SubjectLocation",
41493: "ExposureIndex",
41495: "SensingMethod",
41728: "FileSource",
41729: "SceneType",
41730: "CFAPattern",
41985: "CustomRendered",
41986: "ExposureMode",
41987: "WhiteBalance",
41988: "DigitalZoomRatio",
41989: "FocalLengthIn35mmFilm",
41990: "SceneCaptureType",
41991: "GainControl",
41992: "Contrast",
41993: "Saturation",
41994: "Sharpness",
41995: "DeviceSettingDescription",
41996: "SubjectDistanceRange",
42016: "ImageUniqueID",
42032: "CameraOwnerName",
42033: "BodySerialNumber",
42034: "LensSpecification",
42035: "LensMake",
42036: "LensModel",
42037: "LensSerialNumber",
42112: "GDAL_METADATA",
42113: "GDAL_NODATA",
42240: "Gamma",
50215: "Oce Scanjob Description",
50216: "Oce Application Selector",
50217: "Oce Identification Number",
50218: "Oce ImageLogic Characteristics",
# Adobe DNG
50706: "DNGVersion",
50707: "DNGBackwardVersion",
50708: "UniqueCameraModel",
50709: "LocalizedCameraModel",
50710: "CFAPlaneColor",
50711: "CFALayout",
50712: "LinearizationTable",
50713: "BlackLevelRepeatDim",
50714: "BlackLevel",
50715: "BlackLevelDeltaH",
50716: "BlackLevelDeltaV",
50717: "WhiteLevel",
50718: "DefaultScale",
50719: "DefaultCropOrigin",
50720: "DefaultCropSize",
50721: "ColorMatrix1",
50722: "ColorMatrix2",
50723: "CameraCalibration1",
50724: "CameraCalibration2",
50725: "ReductionMatrix1",
50726: "ReductionMatrix2",
50727: "AnalogBalance",
50728: "AsShotNeutral",
50729: "AsShotWhiteXY",
50730: "BaselineExposure",
50731: "BaselineNoise",
50732: "BaselineSharpness",
50733: "BayerGreenSplit",
50734: "LinearResponseLimit",
50735: "CameraSerialNumber",
50736: "LensInfo",
50737: "ChromaBlurRadius",
50738: "AntiAliasStrength",
50740: "DNGPrivateData",
50778: "CalibrationIlluminant1",
50779: "CalibrationIlluminant2",
50784: "Alias Layer Metadata",
}
TAGS_V2: dict[int, TagInfo] = {}
TAGS_V2_GROUPS: dict[int, dict[int, TagInfo]] = {}
def _populate() -> None:
for k, v in _tags_v2.items():
# Populate legacy structure.
TAGS[k] = v[0]
if len(v) == 4:
for sk, sv in v[3].items():
TAGS[(k, sv)] = sk
TAGS_V2[k] = TagInfo(k, *v)
for group, tags in _tags_v2_groups.items():
TAGS_V2_GROUPS[group] = {k: TagInfo(k, *v) for k, v in tags.items()}
_populate()
##
# Map type numbers to type names -- defined in ImageFileDirectory.
TYPES: dict[int, str] = {}
#
# These tags are handled by default in libtiff, without
# adding to the custom dictionary. From tif_dir.c, searching for
# case TIFFTAG in the _TIFFVSetField function:
# Line: item.
# 148: case TIFFTAG_SUBFILETYPE:
# 151: case TIFFTAG_IMAGEWIDTH:
# 154: case TIFFTAG_IMAGELENGTH:
# 157: case TIFFTAG_BITSPERSAMPLE:
# 181: case TIFFTAG_COMPRESSION:
# 202: case TIFFTAG_PHOTOMETRIC:
# 205: case TIFFTAG_THRESHHOLDING:
# 208: case TIFFTAG_FILLORDER:
# 214: case TIFFTAG_ORIENTATION:
# 221: case TIFFTAG_SAMPLESPERPIXEL:
# 228: case TIFFTAG_ROWSPERSTRIP:
# 238: case TIFFTAG_MINSAMPLEVALUE:
# 241: case TIFFTAG_MAXSAMPLEVALUE:
# 244: case TIFFTAG_SMINSAMPLEVALUE:
# 247: case TIFFTAG_SMAXSAMPLEVALUE:
# 250: case TIFFTAG_XRESOLUTION:
# 256: case TIFFTAG_YRESOLUTION:
# 262: case TIFFTAG_PLANARCONFIG:
# 268: case TIFFTAG_XPOSITION:
# 271: case TIFFTAG_YPOSITION:
# 274: case TIFFTAG_RESOLUTIONUNIT:
# 280: case TIFFTAG_PAGENUMBER:
# 284: case TIFFTAG_HALFTONEHINTS:
# 288: case TIFFTAG_COLORMAP:
# 294: case TIFFTAG_EXTRASAMPLES:
# 298: case TIFFTAG_MATTEING:
# 305: case TIFFTAG_TILEWIDTH:
# 316: case TIFFTAG_TILELENGTH:
# 327: case TIFFTAG_TILEDEPTH:
# 333: case TIFFTAG_DATATYPE:
# 344: case TIFFTAG_SAMPLEFORMAT:
# 361: case TIFFTAG_IMAGEDEPTH:
# 364: case TIFFTAG_SUBIFD:
# 376: case TIFFTAG_YCBCRPOSITIONING:
# 379: case TIFFTAG_YCBCRSUBSAMPLING:
# 383: case TIFFTAG_TRANSFERFUNCTION:
# 389: case TIFFTAG_REFERENCEBLACKWHITE:
# 393: case TIFFTAG_INKNAMES:
# Following pseudo-tags are also handled by default in libtiff:
# TIFFTAG_JPEGQUALITY 65537
# some of these are not in our TAGS_V2 dict and were included from tiff.h
# This list also exists in encode.c
LIBTIFF_CORE = {
255,
256,
257,
258,
259,
262,
263,
266,
274,
277,
278,
280,
281,
340,
341,
282,
283,
284,
286,
287,
296,
297,
321,
320,
338,
32995,
322,
323,
32998,
32996,
339,
32997,
330,
531,
530,
301,
532,
333,
# as above
269, # this has been in our tests forever, and works
65537,
}
LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes
LIBTIFF_CORE.remove(322) # We don't have support for writing tiled images with libtiff
LIBTIFF_CORE.remove(323) # Tiled images
LIBTIFF_CORE.remove(333) # Ink Names either
# Note to advanced users: There may be combinations of these
# parameters and values that when added properly, will work and
# produce valid tiff images that may work in your application.
# It is safe to add and remove tags from this set from Pillow's point
# of view so long as you test against libtiff.
venv\Lib\site-packages\PIL\WalImageFile.py
#
# The Python Imaging Library.
# $Id$
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
"""
This reader is based on the specification available from:
https://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
and has been tested with a few sample files found using google.
.. note::
This format cannot be automatically recognized, so the reader
is not registered for use with :py:func:`PIL.Image.open()`.
To open a WAL file, use the :py:func:`PIL.WalImageFile.open()` function instead.
"""
from __future__ import annotations
from typing import IO
from . import Image, ImageFile
from ._binary import i32le as i32
from ._typing import StrOrBytesPath
class WalImageFile(ImageFile.ImageFile):
format = "WAL"
format_description = "Quake2 Texture"
def _open(self) -> None:
self._mode = "P"
# read header fields
header = self.fp.read(32 + 24 + 32 + 12)
self._size = i32(header, 32), i32(header, 36)
Image._decompression_bomb_check(self.size)
# load pixel data
offset = i32(header, 40)
self.fp.seek(offset)
# strings are null-terminated
self.info["name"] = header[:32].split(b"\0", 1)[0]
next_name = header[56 : 56 + 32].split(b"\0", 1)[0]
if next_name:
self.info["next_name"] = next_name
def load(self) -> Image.core.PixelAccess | None:
if self._im is None:
self.im = Image.core.new(self.mode, self.size)
self.frombytes(self.fp.read(self.size[0] * self.size[1]))
self.putpalette(quake2palette)
return Image.Image.load(self)
def open(filename: StrOrBytesPath | IO[bytes]) -> WalImageFile:
"""
Load texture from a Quake2 WAL texture file.
By default, a Quake2 standard palette is attached to the texture.
To override the palette, use the :py:func:`PIL.Image.Image.putpalette()` method.
:param filename: WAL file name, or an opened file handle.
:returns: An image instance.
"""
return WalImageFile(filename)
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)
venv\Lib\site-packages\PIL\WebPImagePlugin.py
from __future__ import annotations
from io import BytesIO
from typing import IO, Any
from . import Image, ImageFile
try:
from . import _webp
SUPPORTED = True
except ImportError:
SUPPORTED = False
_VP8_MODES_BY_IDENTIFIER = {
b"VP8 ": "RGB",
b"VP8X": "RGBA",
b"VP8L": "RGBA", # lossless
}
def _accept(prefix: bytes) -> bool | str:
is_riff_file_format = prefix.startswith(b"RIFF")
is_webp_file = prefix[8:12] == b"WEBP"
is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER
if is_riff_file_format and is_webp_file and is_valid_vp8_mode:
if not SUPPORTED:
return (
"image file could not be identified because WEBP support not installed"
)
return True
return False
class WebPImageFile(ImageFile.ImageFile):
format = "WEBP"
format_description = "WebP image"
__loaded = 0
__logical_frame = 0
def _open(self) -> None:
# Use the newer AnimDecoder API to parse the (possibly) animated file,
# and access muxed chunks like ICC/EXIF/XMP.
self._decoder = _webp.WebPAnimDecoder(self.fp.read())
# Get info from decoder
self._size, loop_count, bgcolor, frame_count, mode = self._decoder.get_info()
self.info["loop"] = loop_count
bg_a, bg_r, bg_g, bg_b = (
(bgcolor >> 24) & 0xFF,
(bgcolor >> 16) & 0xFF,
(bgcolor >> 8) & 0xFF,
bgcolor & 0xFF,
)
self.info["background"] = (bg_r, bg_g, bg_b, bg_a)
self.n_frames = frame_count
self.is_animated = self.n_frames > 1
self._mode = "RGB" if mode == "RGBX" else mode
self.rawmode = mode
# Attempt to read ICC / EXIF / XMP chunks from file
icc_profile = self._decoder.get_chunk("ICCP")
exif = self._decoder.get_chunk("EXIF")
xmp = self._decoder.get_chunk("XMP ")
if icc_profile:
self.info["icc_profile"] = icc_profile
if exif:
self.info["exif"] = exif
if xmp:
self.info["xmp"] = xmp
# Initialize seek state
self._reset(reset=False)
def _getexif(self) -> dict[int, Any] | None:
if "exif" not in self.info:
return None
return self.getexif()._get_merged_dict()
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
# Set logical frame to requested position
self.__logical_frame = frame
def _reset(self, reset: bool = True) -> None:
if reset:
self._decoder.reset()
self.__physical_frame = 0
self.__loaded = -1
self.__timestamp = 0
def _get_next(self) -> tuple[bytes, int, int]:
# Get next frame
ret = self._decoder.get_next()
self.__physical_frame += 1
# Check if an error occurred
if ret is None:
self._reset() # Reset just to be safe
self.seek(0)
msg = "failed to decode next frame in WebP file"
raise EOFError(msg)
# Compute duration
data, timestamp = ret
duration = timestamp - self.__timestamp
self.__timestamp = timestamp
# libwebp gives frame end, adjust to start of frame
timestamp -= duration
return data, timestamp, duration
def _seek(self, frame: int) -> None:
if self.__physical_frame == frame:
return # Nothing to do
if frame < self.__physical_frame:
self._reset() # Rewind to beginning
while self.__physical_frame < frame:
self._get_next() # Advance to the requested frame
def load(self) -> Image.core.PixelAccess | None:
if self.__loaded != self.__logical_frame:
self._seek(self.__logical_frame)
# We need to load the image data for this frame
data, timestamp, duration = self._get_next()
self.info["timestamp"] = timestamp
self.info["duration"] = duration
self.__loaded = self.__logical_frame
# Set tile
if self.fp and self._exclusive_fp:
self.fp.close()
self.fp = BytesIO(data)
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, self.rawmode)]
return super().load()
def load_seek(self, pos: int) -> None:
pass
def tell(self) -> int:
return self.__logical_frame
def _convert_frame(im: Image.Image) -> Image.Image:
# Make sure image mode is supported
if im.mode not in ("RGBX", "RGBA", "RGB"):
im = im.convert("RGBA" if im.has_transparency_data else "RGB")
return im
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
encoderinfo = im.encoderinfo.copy()
append_images = list(encoderinfo.get("append_images", []))
# If total frame count is 1, then save using the legacy API, which
# will preserve non-alpha modes
total = 0
for ims in [im] + append_images:
total += getattr(ims, "n_frames", 1)
if total == 1:
_save(im, fp, filename)
return
background: int | tuple[int, ...] = (0, 0, 0, 0)
if "background" in encoderinfo:
background = encoderinfo["background"]
elif "background" in im.info:
background = im.info["background"]
if isinstance(background, int):
# GifImagePlugin stores a global color table index in
# info["background"]. So it must be converted to an RGBA value
palette = im.getpalette()
if palette:
r, g, b = palette[background * 3 : (background + 1) * 3]
background = (r, g, b, 255)
else:
background = (background, background, background, 255)
duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
loop = im.encoderinfo.get("loop", 0)
minimize_size = im.encoderinfo.get("minimize_size", False)
kmin = im.encoderinfo.get("kmin", None)
kmax = im.encoderinfo.get("kmax", None)
allow_mixed = im.encoderinfo.get("allow_mixed", False)
verbose = False
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
alpha_quality = im.encoderinfo.get("alpha_quality", 100)
method = im.encoderinfo.get("method", 0)
icc_profile = im.encoderinfo.get("icc_profile") or ""
exif = im.encoderinfo.get("exif", "")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
xmp = im.encoderinfo.get("xmp", "")
if allow_mixed:
lossless = False
# Sensible keyframe defaults are from gif2webp.c script
if kmin is None:
kmin = 9 if lossless else 3
if kmax is None:
kmax = 17 if lossless else 5
# Validate background color
if (
not isinstance(background, (list, tuple))
or len(background) != 4
or not all(0 <= v < 256 for v in background)
):
msg = f"Background color is not an RGBA tuple clamped to (0-255): {background}"
raise OSError(msg)
# Convert to packed uint
bg_r, bg_g, bg_b, bg_a = background
background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0)
# Setup the WebP animation encoder
enc = _webp.WebPAnimEncoder(
im.size,
background,
loop,
minimize_size,
kmin,
kmax,
allow_mixed,
verbose,
)
# Add each frame
frame_idx = 0
timestamp = 0
cur_idx = im.tell()
try:
for ims in [im] + append_images:
# Get number of frames in this image
nfr = getattr(ims, "n_frames", 1)
for idx in range(nfr):
ims.seek(idx)
frame = _convert_frame(ims)
# Append the frame to the animation encoder
enc.add(
frame.getim(),
round(timestamp),
lossless,
quality,
alpha_quality,
method,
)
# Update timestamp and frame index
if isinstance(duration, (list, tuple)):
timestamp += duration[frame_idx]
else:
timestamp += duration
frame_idx += 1
finally:
im.seek(cur_idx)
# Force encoder to flush frames
enc.add(None, round(timestamp), lossless, quality, alpha_quality, 0)
# Get the final output from the encoder
data = enc.assemble(icc_profile, exif, xmp)
if data is None:
msg = "cannot write file as WebP (encoder returned None)"
raise OSError(msg)
fp.write(data)
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
alpha_quality = im.encoderinfo.get("alpha_quality", 100)
icc_profile = im.encoderinfo.get("icc_profile") or ""
exif = im.encoderinfo.get("exif", b"")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
if exif.startswith(b"Exif\x00\x00"):
exif = exif[6:]
xmp = im.encoderinfo.get("xmp", "")
method = im.encoderinfo.get("method", 4)
exact = 1 if im.encoderinfo.get("exact") else 0
im = _convert_frame(im)
data = _webp.WebPEncode(
im.getim(),
lossless,
float(quality),
float(alpha_quality),
icc_profile,
method,
exact,
exif,
xmp,
)
if data is None:
msg = "cannot write file as WebP (encoder returned None)"
raise OSError(msg)
fp.write(data)
Image.register_open(WebPImageFile.format, WebPImageFile, _accept)
if SUPPORTED:
Image.register_save(WebPImageFile.format, _save)
Image.register_save_all(WebPImageFile.format, _save_all)
Image.register_extension(WebPImageFile.format, ".webp")
Image.register_mime(WebPImageFile.format, "image/webp")
venv\Lib\site-packages\PIL\WmfImagePlugin.py
#
# The Python Imaging Library
# $Id$
#
# WMF stub codec
#
# history:
# 1996-12-14 fl Created
# 2004-02-22 fl Turned into a stub driver
# 2004-02-23 fl Added EMF support
#
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
# WMF/EMF reference documentation:
# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf
# http://wvware.sourceforge.net/caolan/index.html
# http://wvware.sourceforge.net/caolan/ora-wmf.html
from __future__ import annotations
from typing import IO
from . import Image, ImageFile
from ._binary import i16le as word
from ._binary import si16le as short
from ._binary import si32le as _long
_handler = None
def register_handler(handler: ImageFile.StubHandler | None) -> None:
"""
Install application-specific WMF image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
if hasattr(Image.core, "drawwmf"):
# install default handler (windows only)
class WmfHandler(ImageFile.StubHandler):
def open(self, im: ImageFile.StubImageFile) -> None:
im._mode = "RGB"
self.bbox = im.info["wmf_bbox"]
def load(self, im: ImageFile.StubImageFile) -> Image.Image:
im.fp.seek(0) # rewind
return Image.frombytes(
"RGB",
im.size,
Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
"raw",
"BGR",
(im.size[0] * 3 + 3) & -4,
-1,
)
register_handler(WmfHandler())
#
# --------------------------------------------------------------------
# Read WMF file
def _accept(prefix: bytes) -> bool:
return prefix.startswith((b"\xd7\xcd\xc6\x9a\x00\x00", b"\x01\x00\x00\x00"))
##
# Image plugin for Windows metafiles.
class WmfStubImageFile(ImageFile.StubImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self) -> None:
# check placable header
s = self.fp.read(44)
if s.startswith(b"\xd7\xcd\xc6\x9a\x00\x00"):
# placeable windows metafile
# get units per inch
inch = word(s, 14)
if inch == 0:
msg = "Invalid inch"
raise ValueError(msg)
self._inch: tuple[float, float] = inch, inch
# get bounding box
x0 = short(s, 6)
y0 = short(s, 8)
x1 = short(s, 10)
y1 = short(s, 12)
# normalize size to 72 dots per inch
self.info["dpi"] = 72
size = (
(x1 - x0) * self.info["dpi"] // inch,
(y1 - y0) * self.info["dpi"] // inch,
)
self.info["wmf_bbox"] = x0, y0, x1, y1
# sanity check (standard metafile header)
if s[22:26] != b"\x01\x00\t\x00":
msg = "Unsupported WMF file format"
raise SyntaxError(msg)
elif s.startswith(b"\x01\x00\x00\x00") and s[40:44] == b" EMF":
# enhanced metafile
# get bounding box
x0 = _long(s, 8)
y0 = _long(s, 12)
x1 = _long(s, 16)
y1 = _long(s, 20)
# get frame (in 0.01 millimeter units)
frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
size = x1 - x0, y1 - y0
# calculate dots per inch from bbox and frame
xdpi = 2540.0 * (x1 - x0) / (frame[2] - frame[0])
ydpi = 2540.0 * (y1 - y0) / (frame[3] - frame[1])
self.info["wmf_bbox"] = x0, y0, x1, y1
if xdpi == ydpi:
self.info["dpi"] = xdpi
else:
self.info["dpi"] = xdpi, ydpi
self._inch = xdpi, ydpi
else:
msg = "Unsupported file format"
raise SyntaxError(msg)
self._mode = "RGB"
self._size = size
loader = self._load()
if loader:
loader.open(self)
def _load(self) -> ImageFile.StubHandler | None:
return _handler
def load(
self, dpi: float | tuple[float, float] | None = None
) -> Image.core.PixelAccess | None:
if dpi is not None:
self.info["dpi"] = dpi
x0, y0, x1, y1 = self.info["wmf_bbox"]
if not isinstance(dpi, tuple):
dpi = dpi, dpi
self._size = (
int((x1 - x0) * dpi[0] / self._inch[0]),
int((y1 - y0) * dpi[1] / self._inch[1]),
)
return super().load()
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if _handler is None or not hasattr(_handler, "save"):
msg = "WMF save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
#
# --------------------------------------------------------------------
# Registry stuff
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
Image.register_save(WmfStubImageFile.format, _save)
Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"])
venv\Lib\site-packages\PIL\XbmImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# XBM File handling
#
# History:
# 1995-09-08 fl Created
# 1996-11-01 fl Added save support
# 1997-07-07 fl Made header parser more tolerant
# 1997-07-22 fl Fixed yet another parser bug
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)
# 2004-02-24 fl Allow some whitespace before first #define
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import re
from typing import IO
from . import Image, ImageFile
# XBM header
xbm_head = re.compile(
rb"\s*#define[ \t]+.*_width[ \t]+(?P[0-9]+)[\r\n]+"
b"#define[ \t]+.*_height[ \t]+(?P[0-9]+)[\r\n]+"
b"(?P"
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P[0-9]+)[\r\n]+"
b")?"
rb"[\000-\377]*_bits\[]"
)
def _accept(prefix: bytes) -> bool:
return prefix.lstrip().startswith(b"#define")
##
# Image plugin for X11 bitmaps.
class XbmImageFile(ImageFile.ImageFile):
format = "XBM"
format_description = "X11 Bitmap"
def _open(self) -> None:
assert self.fp is not None
m = xbm_head.match(self.fp.read(512))
if not m:
msg = "not a XBM file"
raise SyntaxError(msg)
xsize = int(m.group("width"))
ysize = int(m.group("height"))
if m.group("hotspot"):
self.info["hotspot"] = (int(m.group("xhot")), int(m.group("yhot")))
self._mode = "1"
self._size = xsize, ysize
self.tile = [ImageFile._Tile("xbm", (0, 0) + self.size, m.end())]
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode != "1":
msg = f"cannot write mode {im.mode} as XBM"
raise OSError(msg)
fp.write(f"#define im_width {im.size[0]}\n".encode("ascii"))
fp.write(f"#define im_height {im.size[1]}\n".encode("ascii"))
hotspot = im.encoderinfo.get("hotspot")
if hotspot:
fp.write(f"#define im_x_hot {hotspot[0]}\n".encode("ascii"))
fp.write(f"#define im_y_hot {hotspot[1]}\n".encode("ascii"))
fp.write(b"static char im_bits[] = {\n")
ImageFile._save(im, fp, [ImageFile._Tile("xbm", (0, 0) + im.size)])
fp.write(b"};\n")
Image.register_open(XbmImageFile.format, XbmImageFile, _accept)
Image.register_save(XbmImageFile.format, _save)
Image.register_extension(XbmImageFile.format, ".xbm")
Image.register_mime(XbmImageFile.format, "image/xbm")
venv\Lib\site-packages\PIL\XpmImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# XPM File handling
#
# History:
# 1996-12-29 fl Created
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) Fredrik Lundh 1996-2001.
#
# See the README file for information on usage and redistribution.
#
from __future__ import annotations
import re
from . import Image, ImageFile, ImagePalette
from ._binary import o8
# XPM header
xpm_head = re.compile(b'"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)')
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"/* XPM */")
##
# Image plugin for X11 pixel maps.
class XpmImageFile(ImageFile.ImageFile):
format = "XPM"
format_description = "X11 Pixel Map"
def _open(self) -> None:
assert self.fp is not None
if not _accept(self.fp.read(9)):
msg = "not an XPM file"
raise SyntaxError(msg)
# skip forward to next string
while True:
line = self.fp.readline()
if not line:
msg = "broken XPM file"
raise SyntaxError(msg)
m = xpm_head.match(line)
if m:
break
self._size = int(m.group(1)), int(m.group(2))
palette_length = int(m.group(3))
bpp = int(m.group(4))
#
# load palette description
palette = {}
for _ in range(palette_length):
line = self.fp.readline().rstrip()
c = line[1 : bpp + 1]
s = line[bpp + 1 : -2].split()
for i in range(0, len(s), 2):
if s[i] == b"c":
# process colour key
rgb = s[i + 1]
if rgb == b"None":
self.info["transparency"] = c
elif rgb.startswith(b"#"):
rgb_int = int(rgb[1:], 16)
palette[c] = (
o8((rgb_int >> 16) & 255)
+ o8((rgb_int >> 8) & 255)
+ o8(rgb_int & 255)
)
else:
# unknown colour
msg = "cannot read this XPM file"
raise ValueError(msg)
break
else:
# missing colour key
msg = "cannot read this XPM file"
raise ValueError(msg)
args: tuple[int, dict[bytes, bytes] | tuple[bytes, ...]]
if palette_length > 256:
self._mode = "RGB"
args = (bpp, palette)
else:
self._mode = "P"
self.palette = ImagePalette.raw("RGB", b"".join(palette.values()))
args = (bpp, tuple(palette.keys()))
self.tile = [ImageFile._Tile("xpm", (0, 0) + self.size, self.fp.tell(), args)]
def load_read(self, read_bytes: int) -> bytes:
#
# load all image data in one chunk
xsize, ysize = self.size
assert self.fp is not None
s = [self.fp.readline()[1 : xsize + 1].ljust(xsize) for i in range(ysize)]
return b"".join(s)
class XpmDecoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
data = bytearray()
bpp, palette = self.args
dest_length = self.state.xsize * self.state.ysize
if self.mode == "RGB":
dest_length *= 3
pixel_header = False
while len(data) < dest_length:
line = self.fd.readline()
if not line:
break
if line.rstrip() == b"/* pixels */" and not pixel_header:
pixel_header = True
continue
line = b'"'.join(line.split(b'"')[1:-1])
for i in range(0, len(line), bpp):
key = line[i : i + bpp]
if self.mode == "RGB":
data += palette[key]
else:
data += o8(palette.index(key))
self.set_as_raw(bytes(data))
return -1, 0
#
# Registry
Image.register_open(XpmImageFile.format, XpmImageFile, _accept)
Image.register_decoder("xpm", XpmDecoder)
Image.register_extension(XpmImageFile.format, ".xpm")
Image.register_mime(XpmImageFile.format, "image/xpm")
venv\Lib\site-packages\PIL\XVThumbImagePlugin.py
#
# The Python Imaging Library.
# $Id$
#
# XV Thumbnail file handler by Charles E. "Gene" Cash
# (gcash@magicnet.net)
#
# see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV,
# available from ftp://ftp.cis.upenn.edu/pub/xv/
#
# history:
# 98-08-15 cec created (b/w only)
# 98-12-09 cec added color palette
# 98-12-28 fl added to PIL (with only a few very minor modifications)
#
# To do:
# FIXME: make save work (this requires quantization support)
#
from __future__ import annotations
from . import Image, ImageFile, ImagePalette
from ._binary import o8
_MAGIC = b"P7 332"
# standard color palette for thumbnails (RGB332)
PALETTE = b""
for r in range(8):
for g in range(8):
for b in range(4):
PALETTE = PALETTE + (
o8((r * 255) // 7) + o8((g * 255) // 7) + o8((b * 255) // 3)
)
def _accept(prefix: bytes) -> bool:
return prefix.startswith(_MAGIC)
##
# Image plugin for XV thumbnail images.
class XVThumbImageFile(ImageFile.ImageFile):
format = "XVThumb"
format_description = "XV thumbnail image"
def _open(self) -> None:
# check magic
assert self.fp is not None
if not _accept(self.fp.read(6)):
msg = "not an XV thumbnail file"
raise SyntaxError(msg)
# Skip to beginning of next line
self.fp.readline()
# skip info comments
while True:
s = self.fp.readline()
if not s:
msg = "Unexpected EOF reading XV thumbnail file"
raise SyntaxError(msg)
if s[0] != 35: # ie. when not a comment: '#'
break
# parse header line (already read)
s = s.strip().split()
self._mode = "P"
self._size = int(s[0]), int(s[1])
self.palette = ImagePalette.raw("RGB", PALETTE)
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, self.fp.tell(), self.mode)
]
# --------------------------------------------------------------------
Image.register_open(XVThumbImageFile.format, XVThumbImageFile, _accept)
venv\Lib\site-packages\PIL\_binary.py
#
# The Python Imaging Library.
# $Id$
#
# Binary input/output support routines.
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
# Copyright (c) 2012 by Brian Crowell
#
# See the README file for information on usage and redistribution.
#
"""Binary input/output support routines."""
from __future__ import annotations
from struct import pack, unpack_from
def i8(c: bytes) -> int:
return c[0]
def o8(i: int) -> bytes:
return bytes((i & 255,))
# Input, le = little endian, be = big endian
def i16le(c: bytes, o: int = 0) -> int:
"""
Converts a 2-bytes (16 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(" int:
"""
Converts a 2-bytes (16 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(" int:
"""
Converts a 2-bytes (16 bits) string to a signed integer, big endian.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(">h", c, o)[0]
def i32le(c: bytes, o: int = 0) -> int:
"""
Converts a 4-bytes (32 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(" int:
"""
Converts a 4-bytes (32 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(" int:
"""
Converts a 4-bytes (32 bits) string to a signed integer, big endian.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from(">i", c, o)[0]
def i16be(c: bytes, o: int = 0) -> int:
return unpack_from(">H", c, o)[0]
def i32be(c: bytes, o: int = 0) -> int:
return unpack_from(">I", c, o)[0]
# Output, le = little endian, be = big endian
def o16le(i: int) -> bytes:
return pack(" bytes:
return pack(" bytes:
return pack(">H", i)
def o32be(i: int) -> bytes:
return pack(">I", i)
venv\Lib\site-packages\PIL\_deprecate.py
from __future__ import annotations
import warnings
from . import __version__
def deprecate(
deprecated: str,
when: int | None,
replacement: str | None = None,
*,
action: str | None = None,
plural: bool = False,
stacklevel: int = 3,
) -> None:
"""
Deprecations helper.
:param deprecated: Name of thing to be deprecated.
:param when: Pillow major version to be removed in.
:param replacement: Name of replacement.
:param action: Instead of "replacement", give a custom call to action
e.g. "Upgrade to new thing".
:param plural: if the deprecated thing is plural, needing "are" instead of "is".
Usually of the form:
"[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd).
Use [replacement] instead."
You can leave out the replacement sentence:
"[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd)"
Or with another call to action:
"[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd).
[action]."
"""
is_ = "are" if plural else "is"
if when is None:
removed = "a future version"
elif when <= int(__version__.split(".")[0]):
msg = f"{deprecated} {is_} deprecated and should be removed."
raise RuntimeError(msg)
elif when == 12:
removed = "Pillow 12 (2025-10-15)"
elif when == 13:
removed = "Pillow 13 (2026-10-15)"
else:
msg = f"Unknown removal version: {when}. Update {__name__}?"
raise ValueError(msg)
if replacement and action:
msg = "Use only one of 'replacement' and 'action'"
raise ValueError(msg)
if replacement:
action = f". Use {replacement} instead."
elif action:
action = f". {action.rstrip('.')}."
else:
action = ""
warnings.warn(
f"{deprecated} {is_} deprecated and will be removed in {removed}{action}",
DeprecationWarning,
stacklevel=stacklevel,
)
venv\Lib\site-packages\PIL\_tkinter_finder.py
"""Find compiled module linking to Tcl / Tk libraries"""
from __future__ import annotations
import sys
import tkinter
tk = getattr(tkinter, "_tkinter")
try:
if hasattr(sys, "pypy_find_executable"):
TKINTER_LIB = tk.tklib_cffi.__file__
else:
TKINTER_LIB = tk.__file__
except AttributeError:
# _tkinter may be compiled directly into Python, in which case __file__ is
# not available. load_tkinter_funcs will check the binary first in any case.
TKINTER_LIB = None
tk_version = str(tkinter.TkVersion)
venv\Lib\site-packages\PIL\_typing.py
from __future__ import annotations
import os
import sys
from collections.abc import Sequence
from typing import Any, Protocol, TypeVar, Union
TYPE_CHECKING = False
if TYPE_CHECKING:
from numbers import _IntegralLike as IntegralLike
try:
import numpy.typing as npt
NumpyArray = npt.NDArray[Any] # requires numpy>=1.21
except (ImportError, AttributeError):
pass
if sys.version_info >= (3, 13):
from types import CapsuleType
else:
CapsuleType = object
if sys.version_info >= (3, 12):
from collections.abc import Buffer
else:
Buffer = Any
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
try:
from typing_extensions import TypeGuard
except ImportError:
class TypeGuard: # type: ignore[no-redef]
def __class_getitem__(cls, item: Any) -> type[bool]:
return bool
Coords = Union[Sequence[float], Sequence[Sequence[float]]]
_T_co = TypeVar("_T_co", covariant=True)
class SupportsRead(Protocol[_T_co]):
def read(self, length: int = ..., /) -> _T_co: ...
StrOrBytesPath = Union[str, bytes, os.PathLike[str], os.PathLike[bytes]]
__all__ = ["Buffer", "IntegralLike", "StrOrBytesPath", "SupportsRead", "TypeGuard"]
venv\Lib\site-packages\PIL\_util.py
from __future__ import annotations
import os
from typing import Any, NoReturn
from ._typing import StrOrBytesPath, TypeGuard
def is_path(f: Any) -> TypeGuard[StrOrBytesPath]:
return isinstance(f, (bytes, str, os.PathLike))
class DeferredError:
def __init__(self, ex: BaseException):
self.ex = ex
def __getattr__(self, elt: str) -> NoReturn:
raise self.ex
@staticmethod
def new(ex: BaseException) -> Any:
"""
Creates an object that raises the wrapped exception ``ex`` when used,
and casts it to :py:obj:`~typing.Any` type.
"""
return DeferredError(ex)
venv\Lib\site-packages\PIL\_version.py
# Master version for Pillow
from __future__ import annotations
__version__ = "11.3.0"
venv\Lib\site-packages\PIL\__init__.py
"""Pillow (Fork of the Python Imaging Library)
Pillow is the friendly PIL fork by Jeffrey A. Clark and contributors.
https://github.com/python-pillow/Pillow/
Pillow is forked from PIL 1.1.7.
PIL is the Python Imaging Library by Fredrik Lundh and contributors.
Copyright (c) 1999 by Secret Labs AB.
Use PIL.__version__ for this Pillow version.
;-)
"""
from __future__ import annotations
from . import _version
# VERSION was removed in Pillow 6.0.0.
# PILLOW_VERSION was removed in Pillow 9.0.0.
# Use __version__ instead.
__version__ = _version.__version__
del _version
_plugins = [
"AvifImagePlugin",
"BlpImagePlugin",
"BmpImagePlugin",
"BufrStubImagePlugin",
"CurImagePlugin",
"DcxImagePlugin",
"DdsImagePlugin",
"EpsImagePlugin",
"FitsImagePlugin",
"FliImagePlugin",
"FpxImagePlugin",
"FtexImagePlugin",
"GbrImagePlugin",
"GifImagePlugin",
"GribStubImagePlugin",
"Hdf5StubImagePlugin",
"IcnsImagePlugin",
"IcoImagePlugin",
"ImImagePlugin",
"ImtImagePlugin",
"IptcImagePlugin",
"JpegImagePlugin",
"Jpeg2KImagePlugin",
"McIdasImagePlugin",
"MicImagePlugin",
"MpegImagePlugin",
"MpoImagePlugin",
"MspImagePlugin",
"PalmImagePlugin",
"PcdImagePlugin",
"PcxImagePlugin",
"PdfImagePlugin",
"PixarImagePlugin",
"PngImagePlugin",
"PpmImagePlugin",
"PsdImagePlugin",
"QoiImagePlugin",
"SgiImagePlugin",
"SpiderImagePlugin",
"SunImagePlugin",
"TgaImagePlugin",
"TiffImagePlugin",
"WebPImagePlugin",
"WmfImagePlugin",
"XbmImagePlugin",
"XpmImagePlugin",
"XVThumbImagePlugin",
]
class UnidentifiedImageError(OSError):
"""
Raised in :py:meth:`PIL.Image.open` if an image cannot be opened and identified.
If a PNG image raises this error, setting :data:`.ImageFile.LOAD_TRUNCATED_IMAGES`
to true may allow the image to be opened after all. The setting will ignore missing
data and checksum failures.
"""
pass
venv\Lib\site-packages\PIL\__main__.py
from __future__ import annotations
import sys
from .features import pilinfo
pilinfo(supported_formats="--report" not in sys.argv)
venv\Lib\site-packages\pip\__init__.py
from __future__ import annotations
__version__ = "25.2"
def main(args: list[str] | None = None) -> int:
"""This is an internal API only meant for use by pip's own console scripts.
For additional details, see https://github.com/pypa/pip/issues/7498.
"""
from pip._internal.utils.entrypoints import _wrapper
return _wrapper(args)
venv\Lib\site-packages\pip\__main__.py
import os
import sys
# Remove '' and current working directory from the first entry
# of sys.path, if present to avoid using current directory
# in pip commands check, freeze, install, list and show,
# when invoked as python -m pip
if sys.path[0] in ("", os.getcwd()):
sys.path.pop(0)
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == "":
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
if __name__ == "__main__":
from pip._internal.cli.main import main as _main
sys.exit(_main())
venv\Lib\site-packages\pip\__pip-runner__.py
"""Execute exactly this copy of pip, within a different environment.
This file is named as it is, to ensure that this module can't be imported via
an import statement.
"""
# /!\ This version compatibility check section must be Python 2 compatible. /!\
import sys
# Copied from pyproject.toml
PYTHON_REQUIRES = (3, 9)
def version_str(version): # type: ignore
return ".".join(str(v) for v in version)
if sys.version_info[:2] < PYTHON_REQUIRES:
raise SystemExit(
"This version of pip does not support python {} (requires >={}).".format(
version_str(sys.version_info[:2]), version_str(PYTHON_REQUIRES)
)
)
# From here on, we can use Python 3 features, but the syntax must remain
# Python 2 compatible.
import runpy # noqa: E402
from importlib.machinery import PathFinder # noqa: E402
from os.path import dirname # noqa: E402
PIP_SOURCES_ROOT = dirname(dirname(__file__))
class PipImportRedirectingFinder:
@classmethod
def find_spec(self, fullname, path=None, target=None): # type: ignore
if fullname != "pip":
return None
spec = PathFinder.find_spec(fullname, [PIP_SOURCES_ROOT], target)
assert spec, (PIP_SOURCES_ROOT, fullname)
return spec
sys.meta_path.insert(0, PipImportRedirectingFinder())
assert __name__ == "__main__", "Cannot run __pip-runner__.py as a non-main module"
runpy.run_module("pip", run_name="__main__", alter_sys=True)
venv\Lib\site-packages\pyparsing\actions.py
# actions.py
from __future__ import annotations
from typing import Union, Callable, Any
from .exceptions import ParseException
from .util import col, replaced_by_pep8
from .results import ParseResults
ParseAction = Union[
Callable[[], Any],
Callable[[ParseResults], Any],
Callable[[int, ParseResults], Any],
Callable[[str, int, ParseResults], Any],
]
class OnlyOnce:
"""
Wrapper for parse actions, to ensure they are only called once.
Note: parse action signature must include all 3 arguments.
"""
def __init__(self, method_call: Callable[[str, int, ParseResults], Any]) -> None:
from .core import _trim_arity
self.callable = _trim_arity(method_call)
self.called = False
def __call__(self, s: str, l: int, t: ParseResults) -> ParseResults:
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
def reset(self):
"""
Allow the associated parse action to be called once more.
"""
self.called = False
def match_only_at_col(n: int) -> ParseAction:
"""
Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verify_col(strg: str, locn: int, toks: ParseResults) -> None:
if col(locn, strg) != n:
raise ParseException(strg, locn, f"matched token not at column {n}")
return verify_col
def replace_with(repl_str: str) -> ParseAction:
"""
Helper method for common parse actions that simply return
a literal value. Especially useful when used with
:class:`transform_string` ().
Example::
num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
term = na | num
term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [repl_str]
def remove_quotes(s: str, l: int, t: ParseResults) -> Any:
"""
Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use remove_quotes to strip quotation marks from parsed results
quoted_string.set_parse_action(remove_quotes)
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def with_attribute(*args: tuple[str, str], **attr_dict) -> ParseAction:
"""
Helper to create a validating parse action to be used with start
tags created with :class:`make_xml_tags` or
:class:`make_html_tags`. Use ``with_attribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``
`` or ``
``.
Call ``with_attribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`with_class`.
To verify that the attribute exists, but without specifying a value,
pass ``with_attribute.ANY_VALUE`` as the value.
Example::
html = '''
Some text
1 4 0 1 0
1,3 2,3 1,1
this has no type
'''
div,div_end = make_html_tags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().set_parse_action(with_attribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.search_string(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.search_string(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
attrs_list: list[tuple[str, str]] = []
if args:
attrs_list.extend(args)
else:
attrs_list.extend(attr_dict.items())
def pa(s: str, l: int, tokens: ParseResults) -> None:
for attrName, attrValue in attrs_list:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: # type: ignore [attr-defined]
raise ParseException(
s,
l,
f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}",
)
return pa
with_attribute.ANY_VALUE = object() # type: ignore [attr-defined]
def with_class(classname: str, namespace: str = "") -> ParseAction:
"""
Simplified version of :class:`with_attribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
'
td, td_end = make_html_tags("TD")
table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)("body") + td_end
print(table_text.parse_string(text).body)
Prints::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transform_string(tokens[0])
_commasepitem = (
Combine(
OneOrMore(
~Literal(",")
+ ~LineEnd()
+ Word(printables, exclude_chars=",")
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
)
)
.streamline()
.set_name("commaItem")
)
comma_separated_list = DelimitedList(
Opt(quoted_string.copy() | _commasepitem, default="")
).set_name("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
"""Parse action to convert tokens to upper case."""
downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
"""Parse action to convert tokens to lower case."""
# fmt: off
url = Regex(
# https://mathiasbynens.be/demo/url-regex
# https://gist.github.com/dperini/729294
r"(?P" +
# protocol identifier (optional)
# short syntax // still required
r"(?:(?:(?Phttps?|ftp):)?\/\/)" +
# user:pass BasicAuth (optional)
r"(?:(?P\S+(?::\S*)?)@)?" +
r"(?P" +
# IP address exclusion
# private & local networks
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0
# excludes network & broadcast addresses
# (first & last IP address of each class)
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
r"|" +
# host & domain names, may end with dot
# can be replaced by a shortest alternative
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
r"(?:" +
r"(?:" +
r"[a-z0-9\u00a1-\uffff]" +
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
r")?" +
r"[a-z0-9\u00a1-\uffff]\." +
r")+" +
# TLD identifier name, may end with dot
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
r")" +
# port number (optional)
r"(:(?P\d{2,5}))?" +
# resource path (optional)
r"(?P\/[^?# ]*)?" +
# query string (optional)
r"(\?(?P[^#]*))?" +
# fragment (optional)
r"(#(?P\S*))?" +
r")"
).set_name("url")
"""URL (http/https/ftp scheme)"""
# fmt: on
# pre-PEP8 compatibility names
# fmt: off
convertToInteger = staticmethod(replaced_by_pep8("convertToInteger", convert_to_integer))
convertToFloat = staticmethod(replaced_by_pep8("convertToFloat", convert_to_float))
convertToDate = staticmethod(replaced_by_pep8("convertToDate", convert_to_date))
convertToDatetime = staticmethod(replaced_by_pep8("convertToDatetime", convert_to_datetime))
stripHTMLTags = staticmethod(replaced_by_pep8("stripHTMLTags", strip_html_tags))
upcaseTokens = staticmethod(replaced_by_pep8("upcaseTokens", upcase_tokens))
downcaseTokens = staticmethod(replaced_by_pep8("downcaseTokens", downcase_tokens))
# fmt: on
_builtin_exprs = [
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
]
venv\Lib\site-packages\pyparsing\core.py
#
# core.py
#
from __future__ import annotations
import collections.abc
from collections import deque
import os
import typing
from typing import (
Any,
Callable,
Generator,
NamedTuple,
Sequence,
TextIO,
Union,
cast,
)
from abc import ABC, abstractmethod
from enum import Enum
import string
import copy
import warnings
import re
import sys
from collections.abc import Iterable
import traceback
import types
from operator import itemgetter
from functools import wraps
from threading import RLock
from pathlib import Path
from .util import (
_FifoCache,
_UnboundedCache,
__config_flags,
_collapse_string_to_ranges,
_escape_regex_range_chars,
_flatten,
LRUMemo as _LRUMemo,
UnboundedMemo as _UnboundedMemo,
replaced_by_pep8,
)
from .exceptions import *
from .actions import *
from .results import ParseResults, _ParseResultsWithOffset
from .unicode import pyparsing_unicode
_MAX_INT = sys.maxsize
str_type: tuple[type, ...] = (str, bytes)
#
# Copyright (c) 2003-2022 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from functools import cached_property
class __compat__(__config_flags):
"""
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
behavior
"""
_type_desc = "compatibility"
collect_all_And_tokens = True
_all_names = [__ for __ in locals() if not __.startswith("_")]
_fixed_names = """
collect_all_And_tokens
""".split()
class __diag__(__config_flags):
_type_desc = "diagnostic"
warn_multiple_tokens_in_named_alternation = False
warn_ungrouped_named_tokens_in_collection = False
warn_name_set_on_empty_Forward = False
warn_on_parse_using_empty_Forward = False
warn_on_assignment_to_Forward = False
warn_on_multiple_string_args_to_oneof = False
warn_on_match_first_with_lshift_operator = False
enable_debug_on_named_expressions = False
_all_names = [__ for __ in locals() if not __.startswith("_")]
_warning_names = [name for name in _all_names if name.startswith("warn")]
_debug_names = [name for name in _all_names if name.startswith("enable_debug")]
@classmethod
def enable_all_warnings(cls) -> None:
for name in cls._warning_names:
cls.enable(name)
class Diagnostics(Enum):
"""
Diagnostic configuration (all default to disabled)
- ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
- ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
with a results name, but has no contents defined
- ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
defined in a grammar but has never had an expression attached to it
- ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
- ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
incorrectly called with multiple str arguments
- ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
calls to :class:`ParserElement.set_name`
Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
All warnings can be enabled by calling :class:`enable_all_warnings`.
"""
warn_multiple_tokens_in_named_alternation = 0
warn_ungrouped_named_tokens_in_collection = 1
warn_name_set_on_empty_Forward = 2
warn_on_parse_using_empty_Forward = 3
warn_on_assignment_to_Forward = 4
warn_on_multiple_string_args_to_oneof = 5
warn_on_match_first_with_lshift_operator = 6
enable_debug_on_named_expressions = 7
def enable_diag(diag_enum: Diagnostics) -> None:
"""
Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.enable(diag_enum.name)
def disable_diag(diag_enum: Diagnostics) -> None:
"""
Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.disable(diag_enum.name)
def enable_all_warnings() -> None:
"""
Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
"""
__diag__.enable_all_warnings()
# hide abstract class
del __config_flags
def _should_enable_warnings(
cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
) -> bool:
enable = bool(warn_env_var)
for warn_opt in cmd_line_warn_options:
w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
":"
)[:5]
if not w_action.lower().startswith("i") and (
not (w_message or w_category or w_module) or w_module == "pyparsing"
):
enable = True
elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
enable = False
return enable
if _should_enable_warnings(
sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
):
enable_all_warnings()
# build list of single arg builtins, that can be used as parse actions
# fmt: off
_single_arg_builtins = {
sum, len, sorted, reversed, list, tuple, set, any, all, min, max
}
# fmt: on
_generatorType = types.GeneratorType
ParseImplReturnType = tuple[int, Any]
PostParseReturnType = Union[ParseResults, Sequence[ParseResults]]
ParseCondition = Union[
Callable[[], bool],
Callable[[ParseResults], bool],
Callable[[int, ParseResults], bool],
Callable[[str, int, ParseResults], bool],
]
ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
DebugSuccessAction = Callable[
[str, int, int, "ParserElement", ParseResults, bool], None
]
DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
alphas: str = string.ascii_uppercase + string.ascii_lowercase
identchars: str = pyparsing_unicode.Latin1.identchars
identbodychars: str = pyparsing_unicode.Latin1.identbodychars
nums: str = "0123456789"
hexnums: str = nums + "ABCDEFabcdef"
alphanums: str = alphas + nums
printables: str = "".join([c for c in string.printable if c not in string.whitespace])
class _ParseActionIndexError(Exception):
"""
Internal wrapper around IndexError so that IndexErrors raised inside
parse actions aren't misinterpreted as IndexErrors raised inside
ParserElement parseImpl methods.
"""
def __init__(self, msg: str, exc: BaseException) -> None:
self.msg: str = msg
self.exc: BaseException = exc
_trim_arity_call_line: traceback.StackSummary = None # type: ignore[assignment]
pa_call_line_synth = ()
def _trim_arity(func, max_limit=3):
"""decorator to trim function calls to match the arity of the target"""
global _trim_arity_call_line, pa_call_line_synth
if func in _single_arg_builtins:
return lambda s, l, t: func(t)
limit = 0
found_arity = False
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
# fmt: off
LINE_DIFF = 9
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
_trim_arity_call_line = _trim_arity_call_line or traceback.extract_stack(limit=2)[-1]
pa_call_line_synth = pa_call_line_synth or (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF)
def wrapper(*args):
nonlocal found_arity, limit
if found_arity:
return func(*args[limit:])
while 1:
try:
ret = func(*args[limit:])
found_arity = True
return ret
except TypeError as te:
# re-raise TypeErrors if they did not come from our arity testing
if found_arity:
raise
else:
tb = te.__traceback__
frames = traceback.extract_tb(tb, limit=2)
frame_summary = frames[-1]
trim_arity_type_error = (
[frame_summary[:2]][-1][:2] == pa_call_line_synth
)
del tb
if trim_arity_type_error:
if limit < max_limit:
limit += 1
continue
raise
except IndexError as ie:
# wrap IndexErrors inside a _ParseActionIndexError
raise _ParseActionIndexError(
"IndexError raised in parse action", ie
).with_traceback(None)
# fmt: on
# copy func name to wrapper for sensible debug output
# (can't use functools.wraps, since that messes with function signature)
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
wrapper.__name__ = func_name
wrapper.__doc__ = func.__doc__
return wrapper
def condition_as_parse_action(
fn: ParseCondition, message: typing.Optional[str] = None, fatal: bool = False
) -> ParseAction:
"""
Function to convert a simple predicate function that returns ``True`` or ``False``
into a parse action. Can be used in places when a parse action is required
and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
to an operator level in :class:`infix_notation`).
Optional keyword arguments:
- ``message`` - define a custom message to be used in the raised exception
- ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
otherwise will raise :class:`ParseException`
"""
msg = message if message is not None else "failed user-defined condition"
exc_type = ParseFatalException if fatal else ParseException
fn = _trim_arity(fn)
@wraps(fn)
def pa(s, l, t):
if not bool(fn(s, l, t)):
raise exc_type(s, l, msg)
return pa
def _default_start_debug_action(
instring: str, loc: int, expr: ParserElement, cache_hit: bool = False
):
cache_hit_str = "*" if cache_hit else ""
print(
(
f"{cache_hit_str}Match {expr} at loc {loc}({lineno(loc, instring)},{col(loc, instring)})\n"
f" {line(loc, instring)}\n"
f" {'^':>{col(loc, instring)}}"
)
)
def _default_success_debug_action(
instring: str,
startloc: int,
endloc: int,
expr: ParserElement,
toks: ParseResults,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print(f"{cache_hit_str}Matched {expr} -> {toks.as_list()}")
def _default_exception_debug_action(
instring: str,
loc: int,
expr: ParserElement,
exc: Exception,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print(f"{cache_hit_str}Match {expr} failed, {type(exc).__name__} raised: {exc}")
def null_debug_action(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
class ParserElement(ABC):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS: str = " \n\t\r"
verbose_stacktrace: bool = False
_literalStringClass: type = None # type: ignore[assignment]
@staticmethod
def set_default_whitespace_chars(chars: str) -> None:
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, and newline
Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.set_default_whitespace_chars(" \t")
Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
# update whitespace all parse expressions defined in this module
for expr in _builtin_exprs:
if expr.copyDefaultWhiteChars:
expr.whiteChars = set(chars)
@staticmethod
def inline_literals_using(cls: type) -> None:
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inline_literals_using(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
@classmethod
def using_each(cls, seq, **class_kwargs):
"""
Yields a sequence of ``class(obj, **class_kwargs)`` for obj in seq.
Example::
LPAR, RPAR, LBRACE, RBRACE, SEMI = Suppress.using_each("(){};")
"""
yield from (cls(obj, **class_kwargs) for obj in seq)
class DebugActions(NamedTuple):
debug_try: typing.Optional[DebugStartAction]
debug_match: typing.Optional[DebugSuccessAction]
debug_fail: typing.Optional[DebugExceptionAction]
def __init__(self, savelist: bool = False) -> None:
self.parseAction: list[ParseAction] = list()
self.failAction: typing.Optional[ParseFailAction] = None
self.customName: str = None # type: ignore[assignment]
self._defaultName: typing.Optional[str] = None
self.resultsName: str = None # type: ignore[assignment]
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
# used when checking for left-recursion
self._may_return_empty = False
self.keepTabs = False
self.ignoreExprs: list[ParserElement] = list()
self.debug = False
self.streamlined = False
# optimize exception handling for subclasses that don't advance parse index
self.mayIndexError = True
self.errmsg: Union[str, None] = ""
# mark results names as modal (report only last) or cumulative (list all)
self.modalResults = True
# custom debug actions
self.debugActions = self.DebugActions(None, None, None)
# avoid redundant calls to preParse
self.callPreparse = True
self.callDuringTry = False
self.suppress_warnings_: list[Diagnostics] = []
self.show_in_diagram = True
@property
def mayReturnEmpty(self):
return self._may_return_empty
@mayReturnEmpty.setter
def mayReturnEmpty(self, value):
self._may_return_empty = value
def suppress_warning(self, warning_type: Diagnostics) -> ParserElement:
"""
Suppress warnings emitted for a particular diagnostic on this expression.
Example::
base = pp.Forward()
base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
# statement would normally raise a warning, but is now suppressed
print(base.parse_string("x"))
"""
self.suppress_warnings_.append(warning_type)
return self
def visit_all(self):
"""General-purpose method to yield all expressions and sub-expressions
in a grammar. Typically just for internal use.
"""
to_visit = deque([self])
seen = set()
while to_visit:
cur = to_visit.popleft()
# guard against looping forever through recursive grammars
if cur in seen:
continue
seen.add(cur)
to_visit.extend(cur.recurse())
yield cur
def copy(self) -> ParserElement:
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
return cpy
def set_results_name(
self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
) -> ParserElement:
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
Normally, results names are assigned as you would assign keys in a dict:
any existing value is overwritten by later values. If it is necessary to
keep all values captured for a particular results name, call ``set_results_name``
with ``list_all_matches`` = True.
NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.set_results_name("name")``
- see :class:`__call__`. If ``list_all_matches`` is required, use
``expr("name*")``.
Example::
integer = Word(nums)
date_str = (integer.set_results_name("year") + '/'
+ integer.set_results_name("month") + '/'
+ integer.set_results_name("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
listAllMatches = listAllMatches or list_all_matches
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, list_all_matches=False) -> ParserElement:
if name is None:
return self
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
list_all_matches = True
newself.resultsName = name
newself.modalResults = not list_all_matches
return newself
def set_break(self, break_flag: bool = True) -> ParserElement:
"""
Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
disable.
"""
if break_flag:
_parseMethod = self._parse
def breaker(instring, loc, do_actions=True, callPreParse=True):
# this call to breakpoint() is intentional, not a checkin error
breakpoint()
return _parseMethod(instring, loc, do_actions, callPreParse)
breaker._originalParseMethod = _parseMethod # type: ignore [attr-defined]
self._parse = breaker # type: ignore [method-assign]
elif hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod # type: ignore [method-assign]
return self
def set_parse_action(self, *fns: ParseAction, **kwargs: Any) -> ParserElement:
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse actions can be called to perform data conversions, do extra validation,
update external data structures, or enhance or replace the parsed tokens.
Each parse action ``fn`` is a callable method with 0-3 arguments, called as
``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- ``s`` = the original string being parsed (see note below)
- ``loc`` = the location of the matching substring
- ``toks`` = a list of the matched tokens, packaged as a :class:`ParseResults` object
The parsed tokens are passed to the parse action as ParseResults. They can be
modified in place using list-style append, extend, and pop operations to update
the parsed list elements; and with dictionary-style item set and del operations
to add, update, or remove any named results. If the tokens are modified in place,
it is not necessary to return them with a return statement.
Parse actions can also completely replace the given tokens, with another ``ParseResults``
object, or with some entirely different object (common for parse actions that perform data
conversions). A convenient way to build a new parse result is to define the values
using a dict, and then create the return value using :class:`ParseResults.from_dict`.
If None is passed as the ``fn`` parse action, all previously added parse actions for this
expression are cleared.
Optional keyword arguments:
- ``call_during_try`` = (default= ``False``) indicate if parse action should be run during
lookaheads and alternate testing. For parse actions that have side effects, it is
important to only call the parse action once it is determined that it is being
called as part of a successful parse. For parse actions that perform additional
validation, then call_during_try should be passed as True, so that the validation
code is included in the preliminary "try" parses.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parse_string` for more
information on parsing strings containing ```` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
# parse dates in the form YYYY/MM/DD
# use parse action to convert toks from str to int at parse time
def convert_to_int(toks):
return int(toks[0])
# use a parse action to verify that the date is a valid date
def is_valid_date(instring, loc, toks):
from datetime import date
year, month, day = toks[::2]
try:
date(year, month, day)
except ValueError:
raise ParseException(instring, loc, "invalid date given")
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
# add parse actions
integer.set_parse_action(convert_to_int)
date_str.set_parse_action(is_valid_date)
# note that integer fields are now ints, not strings
date_str.run_tests('''
# successful parse - note that integer fields were converted to ints
1999/12/31
# fail - invalid date
1999/13/31
''')
"""
if list(fns) == [None]:
self.parseAction.clear()
return self
if not all(callable(fn) for fn in fns):
raise TypeError("parse actions must be callable")
self.parseAction[:] = [_trim_arity(fn) for fn in fns]
self.callDuringTry = kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_parse_action(self, *fns: ParseAction, **kwargs: Any) -> ParserElement:
"""
Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
See examples in :class:`copy`.
"""
self.parseAction += [_trim_arity(fn) for fn in fns]
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_condition(self, *fns: ParseCondition, **kwargs: Any) -> ParserElement:
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
functions passed to ``add_condition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- ``message`` = define a custom message to be used in the raised exception
- ``fatal`` = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
ParseException
- ``call_during_try`` = boolean to indicate if this method should be called during internal tryParse calls,
default=False
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0),
(line:1, col:1)
"""
for fn in fns:
self.parseAction.append(
condition_as_parse_action(
fn,
message=str(kwargs.get("message")),
fatal=bool(kwargs.get("fatal", False)),
)
)
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def set_fail_action(self, fn: ParseFailAction) -> ParserElement:
"""
Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s, loc, expr, err)`` where:
- ``s`` = string being parsed
- ``loc`` = location where expression match was attempted and failed
- ``expr`` = the parse expression that failed
- ``err`` = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring: str, loc: int) -> int:
if not self.ignoreExprs:
return loc
exprsFound = True
ignore_expr_fns = [e._parse for e in self.ignoreExprs]
last_loc = loc
while exprsFound:
exprsFound = False
for ignore_fn in ignore_expr_fns:
try:
while 1:
loc, dummy = ignore_fn(instring, loc)
exprsFound = True
except ParseException:
pass
# check if all ignore exprs matched but didn't actually advance the parse location
if loc == last_loc:
break
last_loc = loc
return loc
def preParse(self, instring: str, loc: int) -> int:
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
instrlen = len(instring)
white_chars = self.whiteChars
while loc < instrlen and instring[loc] in white_chars:
loc += 1
return loc
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# @profile
def _parseNoCache(
self, instring, loc, do_actions=True, callPreParse=True
) -> tuple[int, ParseResults]:
debugging = self.debug # and do_actions)
len_instring = len(instring)
if debugging or self.failAction:
# print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
try:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.debugActions.debug_try:
self.debugActions.debug_try(instring, tokens_start, self, False)
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, do_actions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, do_actions)
except Exception as err:
# print("Exception raised:", err)
if self.debugActions.debug_fail:
self.debugActions.debug_fail(
instring, tokens_start, self, err, False
)
if self.failAction:
self.failAction(instring, tokens_start, self, err)
raise
else:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, do_actions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, do_actions)
tokens = self.postParse(instring, loc, tokens)
ret_tokens = ParseResults(
tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
)
if self.parseAction and (do_actions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type]
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
except Exception as err:
# print "Exception raised in user parse action:", err
if self.debugActions.debug_fail:
self.debugActions.debug_fail(
instring, tokens_start, self, err, False
)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type]
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
if debugging:
# print("Matched", self, "->", ret_tokens.as_list())
if self.debugActions.debug_match:
self.debugActions.debug_match(
instring, tokens_start, loc, self, ret_tokens, False
)
return loc, ret_tokens
def try_parse(
self,
instring: str,
loc: int,
*,
raise_fatal: bool = False,
do_actions: bool = False,
) -> int:
try:
return self._parse(instring, loc, do_actions=do_actions)[0]
except ParseFatalException:
if raise_fatal:
raise
raise ParseException(instring, loc, self.errmsg, self)
def can_parse_next(self, instring: str, loc: int, do_actions: bool = False) -> bool:
try:
self.try_parse(instring, loc, do_actions=do_actions)
except (ParseException, IndexError):
return False
else:
return True
# cache for left-recursion in Forward references
recursion_lock = RLock()
recursion_memos: collections.abc.MutableMapping[
tuple[int, Forward, bool], tuple[int, Union[ParseResults, Exception]]
] = {}
class _CacheType(typing.Protocol):
"""
Class to be used for packrat and left-recursion cacheing of results
and exceptions.
"""
not_in_cache: bool
def get(self, *args) -> typing.Any: ...
def set(self, *args) -> None: ...
def clear(self) -> None: ...
class NullCache(dict):
"""
A null cache type for initialization of the packrat_cache class variable.
If/when enable_packrat() is called, this null cache will be replaced by a
proper _CacheType class instance.
"""
not_in_cache: bool = True
def get(self, *args) -> typing.Any: ...
def set(self, *args) -> None: ...
def clear(self) -> None: ...
# class-level argument cache for optimizing repeated calls when backtracking
# through recursive expressions
packrat_cache: _CacheType = NullCache()
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(
self, instring, loc, do_actions=True, callPreParse=True
) -> tuple[int, ParseResults]:
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, do_actions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, do_actions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy(), loc))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if self.debug and self.debugActions.debug_try:
try:
self.debugActions.debug_try(instring, loc, self, cache_hit=True) # type: ignore [call-arg]
except TypeError:
pass
if isinstance(value, Exception):
if self.debug and self.debugActions.debug_fail:
try:
self.debugActions.debug_fail(
instring, loc, self, value, cache_hit=True # type: ignore [call-arg]
)
except TypeError:
pass
raise value
value = cast(tuple[int, ParseResults, int], value)
loc_, result, endloc = value[0], value[1].copy(), value[2]
if self.debug and self.debugActions.debug_match:
try:
self.debugActions.debug_match(
instring, loc_, endloc, self, result, cache_hit=True # type: ignore [call-arg]
)
except TypeError:
pass
return loc_, result
_parse = _parseNoCache
@staticmethod
def reset_cache() -> None:
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(
ParserElement.packrat_cache_stats
)
ParserElement.recursion_memos.clear()
_packratEnabled = False
_left_recursion_enabled = False
@staticmethod
def disable_memoization() -> None:
"""
Disables active Packrat or Left Recursion parsing and their memoization
This method also works if neither Packrat nor Left Recursion are enabled.
This makes it safe to call before activating Packrat nor Left Recursion
to clear any previous settings.
"""
ParserElement.reset_cache()
ParserElement._left_recursion_enabled = False
ParserElement._packratEnabled = False
ParserElement._parse = ParserElement._parseNoCache
@staticmethod
def enable_left_recursion(
cache_size_limit: typing.Optional[int] = None, *, force=False
) -> None:
"""
Enables "bounded recursion" parsing, which allows for both direct and indirect
left-recursion. During parsing, left-recursive :class:`Forward` elements are
repeatedly matched with a fixed recursion depth that is gradually increased
until finding the longest match.
Example::
import pyparsing as pp
pp.ParserElement.enable_left_recursion()
E = pp.Forward("E")
num = pp.Word(pp.nums)
# match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
E <<= E + '+' - num | num
print(E.parse_string("1+2+3"))
Recursion search naturally memoizes matches of ``Forward`` elements and may
thus skip reevaluation of parse actions during backtracking. This may break
programs with parse actions which rely on strict ordering of side-effects.
Parameters:
- ``cache_size_limit`` - (default=``None``) - memoize at most this many
``Forward`` elements during matching; if ``None`` (the default),
memoize all ``Forward`` elements.
Bounded Recursion parsing works similar but not identical to Packrat parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._packratEnabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if cache_size_limit is None:
ParserElement.recursion_memos = _UnboundedMemo()
elif cache_size_limit > 0:
ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) # type: ignore[assignment]
else:
raise NotImplementedError(f"Memo size of {cache_size_limit}")
ParserElement._left_recursion_enabled = True
@staticmethod
def enable_packrat(
cache_size_limit: Union[int, None] = 128, *, force: bool = False
) -> None:
"""
Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- ``cache_size_limit`` - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enable_packrat`.
For best results, call ``enable_packrat()`` immediately after
importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enable_packrat()
Packrat parsing works similar but not identical to Bounded Recursion parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._left_recursion_enabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if ParserElement._packratEnabled:
return
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = _UnboundedCache()
else:
ParserElement.packrat_cache = _FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parse_string(
self, instring: str, parse_all: bool = False, *, parseAll: bool = False
) -> ParseResults:
"""
Parse a string with respect to the parser definition. This function is intended as the primary interface to the
client code.
:param instring: The input string to be parsed.
:param parse_all: If set, the entire input string must match the grammar.
:param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
:raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
:returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
an object with attributes if the given parser includes results names.
If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
is also equivalent to ending the grammar with :class:`StringEnd`\\ ().
To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
being parsed, one can ensure a consistent view of the input string by doing one of the following:
- calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
- define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
parse action's ``s`` argument, or
- explicitly expand the tabs in your input string before calling ``parse_string``.
Examples:
By default, partial matches are OK.
>>> res = Word('a').parse_string('aaaaabaaa')
>>> print(res)
['aaaaa']
The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
directly to see more examples.
It raises an exception if parse_all flag is set and instring does not match the whole grammar.
>>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
Traceback (most recent call last):
...
pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6)
"""
parseAll = parse_all or parseAll
ParserElement.reset_cache()
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd().set_debug(False)
se._parse(instring, loc)
except _ParseActionIndexError as pa_exc:
raise pa_exc.exc
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
raise exc.with_traceback(None)
else:
return tokens
def scan_string(
self,
instring: str,
max_matches: int = _MAX_INT,
overlap: bool = False,
always_skip_whitespace=True,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> Generator[tuple[ParseResults, int, int], None, None]:
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``max_matches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parse_string` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens, start, end in Word(alphas).scan_string(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
maxMatches = min(maxMatches, max_matches)
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = str(instring).expandtabs()
instrlen = len(instring)
loc = 0
if always_skip_whitespace:
preparser = Empty()
preparser.ignoreExprs = self.ignoreExprs
preparser.whiteChars = self.whiteChars
preparseFn = preparser.preParse
else:
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc: int = preparseFn(instring, loc)
nextLoc: int
tokens: ParseResults
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
if debug:
print(
{
"tokens": tokens.asList(),
"start": preloc,
"end": nextLoc,
}
)
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def transform_string(self, instring: str, *, debug: bool = False) -> str:
"""
Extension to :class:`scan_string`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transform_string``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transform_string()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transform_string()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.set_parse_action(lambda toks: toks[0].title())
print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out: list[str] = []
lastE = 0
# force preservation of s, to minimize unwanted transformation of string, and to
# keep string locs straight between transform_string and scan_string
self.keepTabs = True
try:
for t, s, e in self.scan_string(instring, debug=debug):
if s > lastE:
out.append(instring[lastE:s])
lastE = e
if not t:
continue
if isinstance(t, ParseResults):
out += t.as_list()
elif isinstance(t, Iterable) and not isinstance(t, str_type):
out.extend(t)
else:
out.append(t)
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join([str(s) for s in _flatten(out)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def search_string(
self,
instring: str,
max_matches: int = _MAX_INT,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> ParseResults:
"""
Another extension to :class:`scan_string`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``max_matches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
maxMatches = min(maxMatches, max_matches)
try:
return ParseResults(
[
t
for t, s, e in self.scan_string(
instring, maxMatches, always_skip_whitespace=False, debug=debug
)
]
)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def split(
self,
instring: str,
maxsplit: int = _MAX_INT,
include_separators: bool = False,
*,
includeSeparators=False,
) -> Generator[str, None, None]:
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``include_separators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = one_of(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
includeSeparators = includeSeparators or include_separators
last = 0
for t, s, e in self.scan_string(instring, max_matches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other) -> ParserElement:
"""
Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
converts them to :class:`Literal`\\ s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
``...`` may be used as a parse expression as a short form of :class:`SkipTo`::
Literal('start') + ... + Literal('end')
is equivalent to::
Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
Note that the skipped text is returned with '_skipped' as a results name,
and to support having multiple skips in the same parser, the value returned is
a list of all skipped text.
"""
if other is Ellipsis:
return _PendingSkip(self)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return And([self, other])
def __radd__(self, other) -> ParserElement:
"""
Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
"""
if other is Ellipsis:
return SkipTo(self)("_skipped*") + self
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return other + self
def __sub__(self, other) -> ParserElement:
"""
Implementation of ``-`` operator, returns :class:`And` with error stop
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return self + And._ErrorStop() + other
def __rsub__(self, other) -> ParserElement:
"""
Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return other - self
def __mul__(self, other) -> ParserElement:
"""
Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also be multiplied by a 2-integer
tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None, n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None, n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None, n) + ~expr``
"""
if other is Ellipsis:
other = (0, None)
elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
other = ((0,) + other[1:] + (None,))[:2]
if not isinstance(other, (int, tuple)):
return NotImplemented
if isinstance(other, int):
minElements, optElements = other, 0
else:
other = tuple(o if o is not Ellipsis else None for o in other)
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
return NotImplemented
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError(
"second tuple value must be greater or equal to first tuple value"
)
if minElements == optElements == 0:
return And([])
if optElements:
def makeOptionalList(n):
if n > 1:
return Opt(self + makeOptionalList(n - 1))
else:
return Opt(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other) -> ParserElement:
return self.__mul__(other)
def __or__(self, other) -> ParserElement:
"""
Implementation of ``|`` operator - returns :class:`MatchFirst`
"""
if other is Ellipsis:
return _PendingSkip(self, must_skip=True)
if isinstance(other, str_type):
# `expr | ""` is equivalent to `Opt(expr)`
if other == "":
return Opt(self)
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return MatchFirst([self, other])
def __ror__(self, other) -> ParserElement:
"""
Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return other | self
def __xor__(self, other) -> ParserElement:
"""
Implementation of ``^`` operator - returns :class:`Or`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return Or([self, other])
def __rxor__(self, other) -> ParserElement:
"""
Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return other ^ self
def __and__(self, other) -> ParserElement:
"""
Implementation of ``&`` operator - returns :class:`Each`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return Each([self, other])
def __rand__(self, other) -> ParserElement:
"""
Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return other & self
def __invert__(self) -> ParserElement:
"""
Implementation of ``~`` operator - returns :class:`NotAny`
"""
return NotAny(self)
# disable __iter__ to override legacy use of sequential access to __getitem__ to
# iterate over a sequence
__iter__ = None
def __getitem__(self, key):
"""
use ``[]`` indexing notation as a short form for expression repetition:
- ``expr[n]`` is equivalent to ``expr*n``
- ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- ``expr[n, ...]`` or ``expr[n,]`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr[..., n]`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
``None`` may be used in place of ``...``.
Note that ``expr[..., n]`` and ``expr[m, n]`` do not raise an exception
if more than ``n`` ``expr``\\ s exist in the input stream. If this behavior is
desired, then write ``expr[..., n] + ~expr``.
For repetition with a stop_on expression, use slice notation:
- ``expr[...: end_expr]`` and ``expr[0, ...: end_expr]`` are equivalent to ``ZeroOrMore(expr, stop_on=end_expr)``
- ``expr[1, ...: end_expr]`` is equivalent to ``OneOrMore(expr, stop_on=end_expr)``
"""
stop_on_defined = False
stop_on = NoMatch()
if isinstance(key, slice):
key, stop_on = key.start, key.stop
if key is None:
key = ...
stop_on_defined = True
elif isinstance(key, tuple) and isinstance(key[-1], slice):
key, stop_on = (key[0], key[1].start), key[1].stop
stop_on_defined = True
# convert single arg keys to tuples
if isinstance(key, str_type):
key = (key,)
try:
iter(key)
except TypeError:
key = (key, key)
if len(key) > 2:
raise TypeError(
f"only 1 or 2 index arguments supported ({key[:5]}{f'... [{len(key)}]' if len(key) > 5 else ''})"
)
# clip to 2 elements
ret = self * tuple(key[:2])
ret = typing.cast(_MultipleMatch, ret)
if stop_on_defined:
ret.stopOn(stop_on)
return ret
def __call__(self, name: typing.Optional[str] = None) -> ParserElement:
"""
Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
passed as ``True``.
If ``name`` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
"""
if name is not None:
return self._setResultsName(name)
return self.copy()
def suppress(self) -> ParserElement:
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Enables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern.
:param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = True
return self
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
:param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = False
return self
def set_whitespace_chars(
self, chars: Union[set[str], str], copy_defaults: bool = False
) -> ParserElement:
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = set(chars)
self.copyDefaultWhiteChars = copy_defaults
return self
def parse_with_tabs(self) -> ParserElement:
"""
Overrides default behavior to expand ```` s to spaces before parsing the input string.
Must be called before ``parse_string`` when the input grammar contains elements that
match ```` characters.
"""
self.keepTabs = True
return self
def ignore(self, other: ParserElement) -> ParserElement:
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = Word(alphas)[...]
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj']
patt.ignore(c_style_comment)
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj', 'lskjd']
"""
if isinstance(other, str_type):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def set_debug_actions(
self,
start_action: DebugStartAction,
success_action: DebugSuccessAction,
exception_action: DebugExceptionAction,
) -> ParserElement:
"""
Customize display of debugging messages while doing pattern matching:
- ``start_action`` - method to be called when an expression is about to be parsed;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
- ``success_action`` - method to be called when an expression has successfully parsed;
should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
- ``exception_action`` - method to be called when expression fails to parse;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
"""
self.debugActions = self.DebugActions(
start_action or _default_start_debug_action, # type: ignore[truthy-function]
success_action or _default_success_debug_action, # type: ignore[truthy-function]
exception_action or _default_exception_debug_action, # type: ignore[truthy-function]
)
self.debug = True
return self
def set_debug(self, flag: bool = True, recurse: bool = False) -> ParserElement:
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to ``True`` to enable, ``False`` to disable.
Set ``recurse`` to ``True`` to set the debug flag on this expression and all sub-expressions.
Example::
wd = Word(alphas).set_name("alphaword")
integer = Word(nums).set_name("numword")
term = wd | integer
# turn on debugging for wd
wd.set_debug()
term[1, ...].parse_string("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`set_debug_actions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match at loc (,
)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
"""
if recurse:
for expr in self.visit_all():
expr.set_debug(flag, recurse=False)
return self
if flag:
self.set_debug_actions(
_default_start_debug_action,
_default_success_debug_action,
_default_exception_debug_action,
)
else:
self.debug = False
return self
@property
def default_name(self) -> str:
if self._defaultName is None:
self._defaultName = self._generateDefaultName()
return self._defaultName
@abstractmethod
def _generateDefaultName(self) -> str:
"""
Child classes must define this method, which defines how the ``default_name`` is set.
"""
def set_name(self, name: typing.Optional[str]) -> ParserElement:
"""
Define name for this expression, makes debugging and exception messages clearer. If
`__diag__.enable_debug_on_named_expressions` is set to True, setting a name will also
enable debug for this expression.
If `name` is None, clears any custom name for this expression, and clears the
debug flag is it was enabled via `__diag__.enable_debug_on_named_expressions`.
Example::
integer = Word(nums)
integer.parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
integer.set_name("integer")
integer.parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.customName = name # type: ignore[assignment]
self.errmsg = f"Expected {str(self)}"
if __diag__.enable_debug_on_named_expressions:
self.set_debug(name is not None)
return self
@property
def name(self) -> str:
# This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
return self.customName if self.customName is not None else self.default_name
@name.setter
def name(self, new_name) -> None:
self.set_name(new_name)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return str(self)
def streamline(self) -> ParserElement:
self.streamlined = True
self._defaultName = None
return self
def recurse(self) -> list[ParserElement]:
return []
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.recurse():
e._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None) -> None:
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
warnings.warn(
"ParserElement.validate() is deprecated, and should not be used to check for left recursion",
DeprecationWarning,
stacklevel=2,
)
self._checkRecursion([])
def parse_file(
self,
file_or_filename: Union[str, Path, TextIO],
encoding: str = "utf-8",
parse_all: bool = False,
*,
parseAll: bool = False,
) -> ParseResults:
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
parseAll = parseAll or parse_all
try:
file_or_filename = typing.cast(TextIO, file_or_filename)
file_contents = file_or_filename.read()
except AttributeError:
file_or_filename = typing.cast(str, file_or_filename)
with open(file_or_filename, "r", encoding=encoding) as f:
file_contents = f.read()
try:
return self.parse_string(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, str_type):
return self.matches(other, parse_all=True)
elif isinstance(other, ParserElement):
return vars(self) == vars(other)
return False
def __hash__(self):
return id(self)
def matches(
self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
) -> bool:
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- ``test_string`` - to test against this expression for a match
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
parseAll = parseAll and parse_all
try:
self.parse_string(str(test_string), parse_all=parseAll)
return True
except ParseBaseException:
return False
def run_tests(
self,
tests: Union[str, list[str]],
parse_all: bool = True,
comment: typing.Optional[Union[ParserElement, str]] = "#",
full_dump: bool = True,
print_results: bool = True,
failure_tests: bool = False,
post_parse: typing.Optional[
Callable[[str, ParseResults], typing.Optional[str]]
] = None,
file: typing.Optional[TextIO] = None,
with_line_numbers: bool = False,
*,
parseAll: bool = True,
fullDump: bool = True,
printResults: bool = True,
failureTests: bool = False,
postParse: typing.Optional[
Callable[[str, ParseResults], typing.Optional[str]]
] = None,
) -> tuple[bool, list[tuple[str, Union[ParseResults, Exception]]]]:
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- ``tests`` - a list of separate test strings, or a multiline string of test strings
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
- ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- ``print_results`` - (default= ``True``) prints test output to stdout
- ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
- ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
- ``file`` - (default= ``None``) optional file-like object to which test output will be written;
if None, will default to ``sys.stdout``
- ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failure_tests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.run_tests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.run_tests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failure_tests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading ``'r'``.)
"""
from .testing import pyparsing_test
parseAll = parseAll and parse_all
fullDump = fullDump and full_dump
printResults = printResults and print_results
failureTests = failureTests or failure_tests
postParse = postParse or post_parse
if isinstance(tests, str_type):
tests = typing.cast(str, tests)
line_strip = type(tests).strip
tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()]
comment_specified = comment is not None
if comment_specified:
if isinstance(comment, str_type):
comment = typing.cast(str, comment)
comment = Literal(comment)
comment = typing.cast(ParserElement, comment)
if file is None:
file = sys.stdout
print_ = file.write
result: Union[ParseResults, Exception]
allResults: list[tuple[str, Union[ParseResults, Exception]]] = []
comments: list[str] = []
success = True
NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
BOM = "\ufeff"
nlstr = "\n"
for t in tests:
if comment_specified and comment.matches(t, False) or comments and not t:
comments.append(
pyparsing_test.with_line_numbers(t) if with_line_numbers else t
)
continue
if not t:
continue
out = [
f"{nlstr}{nlstr.join(comments) if comments else ''}",
pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
]
comments.clear()
try:
# convert newline marks to actual newlines, and strip leading BOM if present
t = NL.transform_string(t.lstrip(BOM))
result = self.parse_string(t, parse_all=parseAll)
except ParseBaseException as pe:
fatal = "(FATAL) " if isinstance(pe, ParseFatalException) else ""
out.append(pe.explain())
out.append(f"FAIL: {fatal}{pe}")
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(pe.__traceback__))
success = success and failureTests
result = pe
except Exception as exc:
tag = "FAIL-EXCEPTION"
# see if this exception was raised in a parse action
tb = exc.__traceback__
it = iter(traceback.walk_tb(tb))
for f, line in it:
if (f.f_code.co_filename, line) == pa_call_line_synth:
next_f = next(it)[0]
tag += f" (raised in parse action {next_f.f_code.co_name!r})"
break
out.append(f"{tag}: {type(exc).__name__}: {exc}")
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(exc.__traceback__))
success = success and failureTests
result = exc
else:
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append(
f"{postParse.__name__} failed: {type(e).__name__}: {e}"
)
else:
out.append(result.dump(full=fullDump))
out.append("")
if printResults:
print_("\n".join(out))
allResults.append((t, result))
return success, allResults
def create_diagram(
self,
output_html: Union[TextIO, Path, str],
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
embed: bool = False,
show_hidden: bool = False,
**kwargs,
) -> None:
"""
Create a railroad diagram for the parser.
Parameters:
- ``output_html`` (str or file-like object) - output target for generated
diagram HTML
- ``vertical`` (int) - threshold for formatting multiple alternatives vertically
instead of horizontally (default=3)
- ``show_results_names`` - bool flag whether diagram should show annotations for
defined results names
- ``show_groups`` - bool flag whether groups should be highlighted with an unlabeled surrounding box
- ``show_hidden`` - bool flag to show diagram elements for internal elements that are usually hidden
- ``embed`` - bool flag whether generated HTML should omit , , and tags to embed
the resulting HTML in an enclosing HTML source
- ``head`` - str containing additional HTML to insert into the section of the generated code;
can be used to insert custom CSS styling
- ``body`` - str containing additional HTML to insert at the beginning of the section of the
generated code
Additional diagram-formatting keyword arguments can also be included;
see railroad.Diagram class.
"""
try:
from .diagram import to_railroad, railroad_to_html
except ImportError as ie:
raise Exception(
"must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
) from ie
self.streamline()
railroad = to_railroad(
self,
vertical=vertical,
show_results_names=show_results_names,
show_groups=show_groups,
show_hidden=show_hidden,
diagram_kwargs=kwargs,
)
if not isinstance(output_html, (str, Path)):
# we were passed a file-like object, just write to it
output_html.write(railroad_to_html(railroad, embed=embed, **kwargs))
return
with open(output_html, "w", encoding="utf-8") as diag_file:
diag_file.write(railroad_to_html(railroad, embed=embed, **kwargs))
# Compatibility synonyms
# fmt: off
inlineLiteralsUsing = staticmethod(replaced_by_pep8("inlineLiteralsUsing", inline_literals_using))
setDefaultWhitespaceChars = staticmethod(replaced_by_pep8(
"setDefaultWhitespaceChars", set_default_whitespace_chars
))
disableMemoization = staticmethod(replaced_by_pep8("disableMemoization", disable_memoization))
enableLeftRecursion = staticmethod(replaced_by_pep8("enableLeftRecursion", enable_left_recursion))
enablePackrat = staticmethod(replaced_by_pep8("enablePackrat", enable_packrat))
resetCache = staticmethod(replaced_by_pep8("resetCache", reset_cache))
setResultsName = replaced_by_pep8("setResultsName", set_results_name)
setBreak = replaced_by_pep8("setBreak", set_break)
setParseAction = replaced_by_pep8("setParseAction", set_parse_action)
addParseAction = replaced_by_pep8("addParseAction", add_parse_action)
addCondition = replaced_by_pep8("addCondition", add_condition)
setFailAction = replaced_by_pep8("setFailAction", set_fail_action)
tryParse = replaced_by_pep8("tryParse", try_parse)
parseString = replaced_by_pep8("parseString", parse_string)
scanString = replaced_by_pep8("scanString", scan_string)
transformString = replaced_by_pep8("transformString", transform_string)
searchString = replaced_by_pep8("searchString", search_string)
ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace)
leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace)
setWhitespaceChars = replaced_by_pep8("setWhitespaceChars", set_whitespace_chars)
parseWithTabs = replaced_by_pep8("parseWithTabs", parse_with_tabs)
setDebugActions = replaced_by_pep8("setDebugActions", set_debug_actions)
setDebug = replaced_by_pep8("setDebug", set_debug)
setName = replaced_by_pep8("setName", set_name)
parseFile = replaced_by_pep8("parseFile", parse_file)
runTests = replaced_by_pep8("runTests", run_tests)
canParseNext = replaced_by_pep8("canParseNext", can_parse_next)
defaultName = default_name
# fmt: on
class _PendingSkip(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr: ParserElement, must_skip: bool = False) -> None:
super().__init__()
self.anchor = expr
self.must_skip = must_skip
def _generateDefaultName(self) -> str:
return str(self.anchor + Empty()).replace("Empty", "...")
def __add__(self, other) -> ParserElement:
skipper = SkipTo(other).set_name("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.as_list() == [""]:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.as_list()[-1:] == [""]:
t.pop("_skipped")
t["_skipped"] = f"missing <{self.anchor!r}>"
return (
self.anchor + skipper().add_parse_action(must_skip)
| skipper().add_parse_action(show_skip)
) + other
return self.anchor + skipper + other
def __repr__(self):
return self.defaultName
def parseImpl(self, *args) -> ParseImplReturnType:
raise Exception(
"use of `...` expression without following SkipTo target expression"
)
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self) -> None:
super().__init__(savelist=False)
def _generateDefaultName(self) -> str:
return type(self).__name__
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__(self) -> None:
super().__init__()
self._may_return_empty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('abc').parse_string('abc') # -> ['abc']
Literal('abc').parse_string('abcdef') # -> ['abc']
Literal('abc').parse_string('ab') # -> Exception: Expected "abc"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __new__(cls, match_string: str = "", *, matchString: str = ""):
# Performance tuning: select a subclass with optimized parseImpl
if cls is Literal:
match_string = matchString or match_string
if not match_string:
return super().__new__(Empty)
if len(match_string) == 1:
return super().__new__(_SingleCharLiteral)
# Default behavior
return super().__new__(cls)
# Needed to make copy.copy() work correctly if we customize __new__
def __getnewargs__(self):
return (self.match,)
def __init__(self, match_string: str = "", *, matchString: str = "") -> None:
super().__init__()
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
self.firstMatchChar = match_string[:1]
self.errmsg = f"Expected {self.name}"
self._may_return_empty = False
self.mayIndexError = False
def _generateDefaultName(self) -> str:
return repr(self.match)
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc] == self.firstMatchChar and instring.startswith(
self.match, loc
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class Empty(Literal):
"""
An empty token, will always match.
"""
def __init__(self, match_string="", *, matchString="") -> None:
super().__init__("")
self._may_return_empty = True
self.mayIndexError = False
def _generateDefaultName(self) -> str:
return "Empty"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
return loc, []
class _SingleCharLiteral(Literal):
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is,
it must be immediately preceded and followed by whitespace or
non-keyword characters. Compare with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``ident_chars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parse_string("start") # -> ['start']
Keyword("start").parse_string("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(
self,
match_string: str = "",
ident_chars: typing.Optional[str] = None,
caseless: bool = False,
*,
matchString: str = "",
identChars: typing.Optional[str] = None,
) -> None:
super().__init__()
identChars = identChars or ident_chars
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
self.firstMatchChar = match_string[:1]
if not self.firstMatchChar:
raise ValueError("null string passed to Keyword; use Empty() instead")
self.errmsg = f"Expected {type(self).__name__} {self.name}"
self._may_return_empty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = match_string.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def _generateDefaultName(self) -> str:
return repr(self.match)
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
errmsg = self.errmsg or ""
errloc = loc
if self.caseless:
if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
if loc == 0 or instring[loc - 1].upper() not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
):
return loc + self.matchLen, self.match
# followed by keyword char
errmsg += ", was immediately followed by keyword character"
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
elif (
instring[loc] == self.firstMatchChar
and self.matchLen == 1
or instring.startswith(self.match, loc)
):
if loc == 0 or instring[loc - 1] not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
):
return loc + self.matchLen, self.match
# followed by keyword char
errmsg += ", keyword was immediately followed by keyword character"
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
raise ParseException(instring, errloc, errmsg, self)
@staticmethod
def set_default_keyword_chars(chars) -> None:
"""
Overrides the default characters used by :class:`Keyword` expressions.
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
# Compatibility synonyms
setDefaultKeywordChars = staticmethod(
replaced_by_pep8("setDefaultKeywordChars", set_default_keyword_chars)
)
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, match_string: str = "", *, matchString: str = "") -> None:
match_string = matchString or match_string
super().__init__(match_string.upper())
# Preserve the defining literal.
self.returnString = match_string
self.errmsg = f"Expected {self.name}"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__(
self,
match_string: str = "",
ident_chars: typing.Optional[str] = None,
*,
matchString: str = "",
identChars: typing.Optional[str] = None,
) -> None:
identChars = identChars or ident_chars
match_string = matchString or match_string
super().__init__(match_string, identChars, caseless=True)
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
- ``max_mismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(
self,
match_string: str,
max_mismatches: typing.Optional[int] = None,
*,
maxMismatches: int = 1,
caseless=False,
) -> None:
maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
super().__init__()
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = f"Expected {self.match_string!r} (with up to {self.maxMismatches} mismatches)"
self.caseless = caseless
self.mayIndexError = False
self._may_return_empty = False
def _generateDefaultName(self) -> str:
return f"{type(self).__name__}:{self.match_string!r}"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(
zip(instring[loc:maxloc], match_string)
):
src, mat = s_m
if self.caseless:
src, mat = src.lower(), mat.lower()
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = start + match_stringloc + 1
results = ParseResults([instring[start:loc]])
results["original"] = match_string
results["mismatches"] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Parameters:
- ``init_chars`` - string of all characters that should be used to
match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
if ``body_chars`` is also specified, then this is the string of
initial characters
- ``body_chars`` - string of characters that
can be used for matching after a matched initial character as
given in ``init_chars``; if omitted, same as the initial characters
(default=``None``)
- ``min`` - minimum number of characters to match (default=1)
- ``max`` - maximum number of characters to match (default=0)
- ``exact`` - exact number of characters to match (default=0)
- ``as_keyword`` - match as a keyword (default=``False``)
- ``exclude_chars`` - characters that might be
found in the input ``body_chars`` string but which should not be
accepted for matching ;useful to define a word of all
printables except for one or two characters, for instance
(default=``None``)
:class:`srange` is useful for defining custom character set strings
for defining :class:`Word` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
``alphas``, ``nums``, and ``printables`` are also defined in several
Unicode sets - see :class:`pyparsing_unicode``.
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capitalized_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, exclude_chars=",")
"""
def __init__(
self,
init_chars: str = "",
body_chars: typing.Optional[str] = None,
min: int = 1,
max: int = 0,
exact: int = 0,
as_keyword: bool = False,
exclude_chars: typing.Optional[str] = None,
*,
initChars: typing.Optional[str] = None,
bodyChars: typing.Optional[str] = None,
asKeyword: bool = False,
excludeChars: typing.Optional[str] = None,
) -> None:
initChars = initChars or init_chars
bodyChars = bodyChars or body_chars
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__()
if not initChars:
raise ValueError(
f"invalid {type(self).__name__}, initChars cannot be empty string"
)
initChars_set = set(initChars)
if excludeChars:
excludeChars_set = set(excludeChars)
initChars_set -= excludeChars_set
if bodyChars:
bodyChars = "".join(set(bodyChars) - excludeChars_set)
self.initChars = initChars_set
self.initCharsOrig = "".join(sorted(initChars_set))
if bodyChars:
self.bodyChars = set(bodyChars)
self.bodyCharsOrig = "".join(sorted(bodyChars))
else:
self.bodyChars = initChars_set
self.bodyCharsOrig = self.initCharsOrig
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
)
if self.maxSpecified and min > max:
raise ValueError(
f"invalid args, if min and max both specified min must be <= max (min={min}, max={max})"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
min = max = exact
self.maxLen = exact
self.minLen = exact
self.errmsg = f"Expected {self.name}"
self.mayIndexError = False
self.asKeyword = asKeyword
if self.asKeyword:
self.errmsg += " as a keyword"
# see if we can make a regex for this Word
if " " not in (self.initChars | self.bodyChars):
if len(self.initChars) == 1:
re_leading_fragment = re.escape(self.initCharsOrig)
else:
re_leading_fragment = f"[{_collapse_string_to_ranges(self.initChars)}]"
if self.bodyChars == self.initChars:
if max == 0 and self.minLen == 1:
repeat = "+"
elif max == 1:
repeat = ""
else:
if self.minLen != self.maxLen:
repeat = f"{{{self.minLen},{'' if self.maxLen == _MAX_INT else self.maxLen}}}"
else:
repeat = f"{{{self.minLen}}}"
self.reString = f"{re_leading_fragment}{repeat}"
else:
if max == 1:
re_body_fragment = ""
repeat = ""
else:
re_body_fragment = f"[{_collapse_string_to_ranges(self.bodyChars)}]"
if max == 0 and self.minLen == 1:
repeat = "*"
elif max == 2:
repeat = "?" if min <= 1 else ""
else:
if min != max:
repeat = f"{{{min - 1 if min > 0 else ''},{max - 1 if max > 0 else ''}}}"
else:
repeat = f"{{{min - 1 if min > 0 else ''}}}"
self.reString = f"{re_leading_fragment}{re_body_fragment}{repeat}"
if self.asKeyword:
self.reString = rf"\b{self.reString}\b"
try:
self.re = re.compile(self.reString)
except re.error:
self.re = None # type: ignore[assignment]
else:
self.re_match = self.re.match
self.parseImpl = self.parseImpl_regex # type: ignore[method-assign]
def _generateDefaultName(self) -> str:
def charsAsStr(s):
max_repr_len = 16
s = _collapse_string_to_ranges(s, re_escape=False)
if len(s) > max_repr_len:
return s[: max_repr_len - 3] + "..."
return s
if self.initChars != self.bodyChars:
base = f"W:({charsAsStr(self.initChars)}, {charsAsStr(self.bodyChars)})"
else:
base = f"W:({charsAsStr(self.initChars)})"
# add length specification
if self.minLen > 1 or self.maxLen != _MAX_INT:
if self.minLen == self.maxLen:
if self.minLen == 1:
return base[2:]
else:
return base + f"{{{self.minLen}}}"
elif self.maxLen == _MAX_INT:
return base + f"{{{self.minLen},...}}"
else:
return base + f"{{{self.minLen},{self.maxLen}}}"
return base
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
body_chars: set[str] = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in body_chars:
loc += 1
throw_exception = False
if loc - start < self.minLen:
throw_exception = True
elif self.maxSpecified and loc < instrlen and instring[loc] in body_chars:
throw_exception = True
elif self.asKeyword and (
(start > 0 and instring[start - 1] in body_chars)
or (loc < instrlen and instring[loc] in body_chars)
):
throw_exception = True
if throw_exception:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def parseImpl_regex(self, instring, loc, do_actions=True) -> ParseImplReturnType:
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
class Char(Word):
"""A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(
self,
charset: str,
as_keyword: bool = False,
exclude_chars: typing.Optional[str] = None,
*,
asKeyword: bool = False,
excludeChars: typing.Optional[str] = None,
) -> None:
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__(
charset, exact=1, as_keyword=asKeyword, exclude_chars=excludeChars
)
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module `_.
If the given regex contains named groups (defined using ``(?P...)``),
these will be preserved as named :class:`ParseResults`.
If instead of the Python stdlib ``re`` module you wish to use a different RE module
(such as the ``regex`` module), you can do so by building your ``Regex`` object with
a compiled RE that was compiled using ``regex``.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
# named fields in a regex will be returned as named results
date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
# the Regex class will accept re's compiled using the regex module
import regex
parser = pp.Regex(regex.compile(r'[0-9]'))
"""
def __init__(
self,
pattern: Any,
flags: Union[re.RegexFlag, int] = 0,
as_group_list: bool = False,
as_match: bool = False,
*,
asGroupList: bool = False,
asMatch: bool = False,
) -> None:
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module `_ module for an
explanation of the acceptable patterns and flags.
"""
super().__init__()
asGroupList = asGroupList or as_group_list
asMatch = asMatch or as_match
if isinstance(pattern, str_type):
if not pattern:
raise ValueError("null string passed to Regex; use Empty() instead")
self._re = None
self._may_return_empty = None # type: ignore [assignment]
self.reString = self.pattern = pattern
elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
self._re = pattern
self._may_return_empty = None # type: ignore [assignment]
self.pattern = self.reString = pattern.pattern
elif callable(pattern):
# defer creating this pattern until we really need it
self.pattern = pattern
self._may_return_empty = None # type: ignore [assignment]
self._re = None
else:
raise TypeError(
"Regex may only be constructed with a string or a compiled RE object,"
" or a callable that takes no arguments and returns a string or a"
" compiled RE object"
)
self.flags = flags
self.errmsg = f"Expected {self.name}"
self.mayIndexError = False
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList # type: ignore [method-assign]
if self.asMatch:
self.parseImpl = self.parseImplAsMatch # type: ignore [method-assign]
@cached_property
def re(self) -> re.Pattern:
if self._re:
return self._re
if callable(self.pattern):
# replace self.pattern with the string returned by calling self.pattern()
self.pattern = cast(Callable[[], str], self.pattern)()
# see if we got a compiled RE back instead of a str - if so, we're done
if hasattr(self.pattern, "pattern") and hasattr(self.pattern, "match"):
self._re = cast(re.Pattern[str], self.pattern)
self.pattern = self.reString = self._re.pattern
return self._re
try:
self._re = re.compile(self.pattern, self.flags)
except re.error:
raise ValueError(f"invalid pattern ({self.pattern!r}) passed to Regex")
else:
self._may_return_empty = self.re.match("", pos=0) is not None
return self._re
@cached_property
def re_match(self) -> Callable[[str, int], Any]:
return self.re.match
@property
def mayReturnEmpty(self):
if self._may_return_empty is None:
# force compile of regex pattern, to set may_return_empty flag
self.re # noqa
return self._may_return_empty
@mayReturnEmpty.setter
def mayReturnEmpty(self, value):
self._may_return_empty = value
def _generateDefaultName(self) -> str:
unescaped = repr(self.pattern).replace("\\\\", "\\")
return f"Re:({unescaped})"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# explicit check for matching past the length of the string;
# this is done because the re module will not complain about
# a match with `pos > len(instring)`, it will just return ""
if loc > len(instring) and self.mayReturnEmpty:
raise ParseException(instring, loc, self.errmsg, self)
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, do_actions=True):
if loc > len(instring) and self.mayReturnEmpty:
raise ParseException(instring, loc, self.errmsg, self)
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, do_actions=True):
if loc > len(instring) and self.mayReturnEmpty:
raise ParseException(instring, loc, self.errmsg, self)
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def sub(self, repl: str) -> ParserElement:
r"""
Return :class:`Regex` with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) `_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2\1>")
print(make_html.transform_string("h1:main title:"))
# prints "
main title
"
"""
if self.asGroupList:
raise TypeError("cannot use sub() with Regex(as_group_list=True)")
if self.asMatch and callable(repl):
raise TypeError(
"cannot use sub() with a callable with Regex(as_match=True)"
)
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.add_parse_action(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- ``quote_char`` - string of one or more characters defining the
quote delimiting string
- ``esc_char`` - character to re_escape quotes, typically backslash
(default= ``None``)
- ``esc_quote`` - special quote sequence to re_escape an embedded quote
string (such as SQL's ``""`` to re_escape an embedded ``"``)
(default= ``None``)
- ``multiline`` - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- ``unquote_results`` - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- ``end_quote_char`` - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quote_char)
- ``convert_whitespace_escapes`` - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
Example::
qs = QuotedString('"')
print(qs.search_string('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', end_quote_char='}}')
print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', esc_quote='""')
print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
ws_map = dict(((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")))
def __init__(
self,
quote_char: str = "",
esc_char: typing.Optional[str] = None,
esc_quote: typing.Optional[str] = None,
multiline: bool = False,
unquote_results: bool = True,
end_quote_char: typing.Optional[str] = None,
convert_whitespace_escapes: bool = True,
*,
quoteChar: str = "",
escChar: typing.Optional[str] = None,
escQuote: typing.Optional[str] = None,
unquoteResults: bool = True,
endQuoteChar: typing.Optional[str] = None,
convertWhitespaceEscapes: bool = True,
) -> None:
super().__init__()
esc_char = escChar or esc_char
esc_quote = escQuote or esc_quote
unquote_results = unquoteResults and unquote_results
end_quote_char = endQuoteChar or end_quote_char
convert_whitespace_escapes = (
convertWhitespaceEscapes and convert_whitespace_escapes
)
quote_char = quoteChar or quote_char
# remove white space from quote chars
quote_char = quote_char.strip()
if not quote_char:
raise ValueError("quote_char cannot be the empty string")
if end_quote_char is None:
end_quote_char = quote_char
else:
end_quote_char = end_quote_char.strip()
if not end_quote_char:
raise ValueError("end_quote_char cannot be the empty string")
self.quote_char: str = quote_char
self.quote_char_len: int = len(quote_char)
self.first_quote_char: str = quote_char[0]
self.end_quote_char: str = end_quote_char
self.end_quote_char_len: int = len(end_quote_char)
self.esc_char: str = esc_char or ""
self.has_esc_char: bool = esc_char is not None
self.esc_quote: str = esc_quote or ""
self.unquote_results: bool = unquote_results
self.convert_whitespace_escapes: bool = convert_whitespace_escapes
self.multiline = multiline
self.re_flags = re.RegexFlag(0)
# fmt: off
# build up re pattern for the content between the quote delimiters
inner_pattern: list[str] = []
if esc_quote:
inner_pattern.append(rf"(?:{re.escape(esc_quote)})")
if esc_char:
inner_pattern.append(rf"(?:{re.escape(esc_char)}.)")
if len(self.end_quote_char) > 1:
inner_pattern.append(
"(?:"
+ "|".join(
f"(?:{re.escape(self.end_quote_char[:i])}(?!{re.escape(self.end_quote_char[i:])}))"
for i in range(len(self.end_quote_char) - 1, 0, -1)
)
+ ")"
)
if self.multiline:
self.re_flags |= re.MULTILINE | re.DOTALL
inner_pattern.append(
rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}"
rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])"
)
else:
inner_pattern.append(
rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}\n\r"
rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])"
)
self.pattern = "".join(
[
re.escape(self.quote_char),
"(?:",
'|'.join(inner_pattern),
")*",
re.escape(self.end_quote_char),
]
)
if self.unquote_results:
if self.convert_whitespace_escapes:
self.unquote_scan_re = re.compile(
rf"({'|'.join(re.escape(k) for k in self.ws_map)})"
rf"|(\\[0-7]{3}|\\0|\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4})"
rf"|({re.escape(self.esc_char)}.)"
rf"|(\n|.)",
flags=self.re_flags,
)
else:
self.unquote_scan_re = re.compile(
rf"({re.escape(self.esc_char)}.)"
rf"|(\n|.)",
flags=self.re_flags
)
# fmt: on
try:
self.re = re.compile(self.pattern, self.re_flags)
self.reString = self.pattern
self.re_match = self.re.match
except re.error:
raise ValueError(f"invalid pattern {self.pattern!r} passed to Regex")
self.errmsg = f"Expected {self.name}"
self.mayIndexError = False
self._may_return_empty = True
def _generateDefaultName(self) -> str:
if self.quote_char == self.end_quote_char and isinstance(
self.quote_char, str_type
):
return f"string enclosed in {self.quote_char!r}"
return f"quoted string, starting with {self.quote_char} ending with {self.end_quote_char}"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# check first character of opening quote to see if that is a match
# before doing the more complicated regex match
result = (
instring[loc] == self.first_quote_char
and self.re_match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
# get ending loc and matched string from regex matching result
loc = result.end()
ret = result.group()
def convert_escaped_numerics(s: str) -> str:
if s == "0":
return "\0"
if s.isdigit() and len(s) == 3:
return chr(int(s, base=8))
elif s.startswith(("u", "x")):
return chr(int(s[1:], base=16))
else:
return s
if self.unquote_results:
# strip off quotes
ret = ret[self.quote_char_len : -self.end_quote_char_len]
if isinstance(ret, str_type):
# fmt: off
if self.convert_whitespace_escapes:
# as we iterate over matches in the input string,
# collect from whichever match group of the unquote_scan_re
# regex matches (only 1 group will match at any given time)
ret = "".join(
# match group 1 matches \t, \n, etc.
self.ws_map[match.group(1)] if match.group(1)
# match group 2 matches escaped octal, null, hex, and Unicode
# sequences
else convert_escaped_numerics(match.group(2)[1:]) if match.group(2)
# match group 3 matches escaped characters
else match.group(3)[-1] if match.group(3)
# match group 4 matches any character
else match.group(4)
for match in self.unquote_scan_re.finditer(ret)
)
else:
ret = "".join(
# match group 1 matches escaped characters
match.group(1)[-1] if match.group(1)
# match group 2 matches any character
else match.group(2)
for match in self.unquote_scan_re.finditer(ret)
)
# fmt: on
# replace escaped quotes
if self.esc_quote:
ret = ret.replace(self.esc_quote, self.end_quote_char)
return loc, ret
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(DelimitedList(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(
self,
not_chars: str = "",
min: int = 1,
max: int = 0,
exact: int = 0,
*,
notChars: str = "",
) -> None:
super().__init__()
self.skipWhitespace = False
self.notChars = not_chars or notChars
self.notCharsSet = set(self.notChars)
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use"
" Opt(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = f"Expected {self.name}"
self._may_return_empty = self.minLen == 0
self.mayIndexError = False
def _generateDefaultName(self) -> str:
not_chars_str = _collapse_string_to_ranges(self.notChars)
if len(not_chars_str) > 16:
return f"!W:({self.notChars[: 16 - 3]}...)"
else:
return f"!W:({self.notChars})"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
notchars = self.notCharsSet
if instring[loc] in notchars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
" ": "",
"\t": "",
"\n": "",
"\r": "",
"\f": "",
"\u00A0": "",
"\u1680": "",
"\u180E": "",
"\u2000": "",
"\u2001": "",
"\u2002": "",
"\u2003": "",
"\u2004": "",
"\u2005": "",
"\u2006": "",
"\u2007": "",
"\u2008": "",
"\u2009": "",
"\u200A": "",
"\u200B": "",
"\u202F": "",
"\u205F": "",
"\u3000": "",
}
def __init__(
self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0
) -> None:
super().__init__()
self.matchWhite = ws
self.set_whitespace_chars(
"".join(c for c in self.whiteStrs if c not in self.matchWhite),
copy_defaults=True,
)
# self.leave_whitespace()
self._may_return_empty = True
self.errmsg = f"Expected {self.name}"
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def _generateDefaultName(self) -> str:
return "".join(White.whiteStrs[c] for c in self.matchWhite)
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class PositionToken(Token):
def __init__(self) -> None:
super().__init__()
self._may_return_empty = True
self.mayIndexError = False
class GoToColumn(PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__(self, colno: int) -> None:
super().__init__()
self.col = colno
def preParse(self, instring: str, loc: int) -> int:
if col(loc, instring) == self.col:
return loc
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while (
loc < instrlen
and instring[loc].isspace()
and col(loc, instring) != self.col
):
loc += 1
return loc
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc:newloc]
return newloc, ret
class LineStart(PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + rest_of_line).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self) -> None:
super().__init__()
self.leave_whitespace()
self.orig_whiteChars = set() | self.whiteChars
self.whiteChars.discard("\n")
self.skipper = Empty().set_whitespace_chars(self.whiteChars)
self.set_name("start of line")
def preParse(self, instring: str, loc: int) -> int:
if loc == 0:
return loc
ret = self.skipper.preParse(instring, loc)
if "\n" in self.orig_whiteChars:
while instring[ret : ret + 1] == "\n":
ret = self.skipper.preParse(instring, ret + 1)
return ret
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self) -> None:
super().__init__()
self.whiteChars.discard("\n")
self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
self.set_name("end of line")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self) -> None:
super().__init__()
self.set_name("start of text")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# see if entire string up to here is just whitespace and ignoreables
if loc != 0 and loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__(self) -> None:
super().__init__()
self.set_name("end of text")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
if loc == len(instring):
return loc + 1, []
if loc > len(instring):
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(PositionToken):
"""Matches if the current position is at the beginning of a
:class:`Word`, and is not preceded by any character in a given
set of ``word_chars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(
self, word_chars: str = printables, *, wordChars: str = printables
) -> None:
wordChars = word_chars if wordChars == printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.set_name("start of a word")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if loc != 0:
if (
instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(PositionToken):
"""Matches if the current position is at the end of a :class:`Word`,
and is not followed by any character in a given set of ``word_chars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(
self, word_chars: str = printables, *, wordChars: str = printables
) -> None:
wordChars = word_chars if wordChars == printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.set_name("end of a word")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (
instring[loc] in self.wordChars
or instring[loc - 1] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class Tag(Token):
"""
A meta-element for inserting a named result into the parsed
tokens that may be checked later in a parse action or while
processing the parsed results. Accepts an optional tag value,
defaulting to `True`.
Example::
end_punc = "." | ("!" + Tag("enthusiastic")))
greeting = "Hello," + Word(alphas) + end_punc
result = greeting.parse_string("Hello, World.")
print(result.dump())
result = greeting.parse_string("Hello, World!")
print(result.dump())
prints::
['Hello,', 'World', '.']
['Hello,', 'World', '!']
- enthusiastic: True
"""
def __init__(self, tag_name: str, value: Any = True) -> None:
super().__init__()
self._may_return_empty = True
self.mayIndexError = False
self.leave_whitespace()
self.tag_name = tag_name
self.tag_value = value
self.add_parse_action(self._add_tag)
self.show_in_diagram = False
def _add_tag(self, tokens: ParseResults):
tokens[self.tag_name] = self.tag_value
def _generateDefaultName(self) -> str:
return f"{type(self).__name__}:{self.tag_name}={self.tag_value!r}"
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(
self, exprs: typing.Iterable[ParserElement], savelist: bool = False
) -> None:
super().__init__(savelist)
self.exprs: list[ParserElement]
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, str_type):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, str_type) for expr in exprs):
exprs = (
self._literalStringClass(e) if isinstance(e, str_type) else e
for e in exprs
)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def recurse(self) -> list[ParserElement]:
return self.exprs[:]
def append(self, other) -> ParserElement:
self.exprs.append(other)
self._defaultName = None
return self
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().leave_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().ignore_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def _generateDefaultName(self) -> str:
return f"{type(self).__name__}:({self.exprs})"
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
for e in self.exprs:
e.streamline()
# collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self._defaultName = None
self._may_return_empty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self._defaultName = None
self._may_return_empty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = f"Expected {self}"
return self
def validate(self, validateTrace=None) -> None:
warnings.warn(
"ParserElement.validate() is deprecated, and should not be used to check for left recursion",
DeprecationWarning,
stacklevel=2,
)
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self._checkRecursion([])
def copy(self) -> ParserElement:
ret = super().copy()
ret = typing.cast(ParseExpression, ret)
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, list_all_matches=False) -> ParserElement:
if not (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
return super()._setResultsName(name, list_all_matches)
for e in self.exprs:
if (
isinstance(e, ParserElement)
and e.resultsName
and (
Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
)
):
warning = (
"warn_ungrouped_named_tokens_in_collection:"
f" setting results name {name!r} on {type(self).__name__} expression"
f" collides with {e.resultsName!r} on contained expression"
)
warnings.warn(warning, stacklevel=3)
break
return super()._setResultsName(name, list_all_matches)
# Compatibility synonyms
# fmt: off
leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace)
ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace)
# fmt: on
class And(ParseExpression):
"""
Requires all given :class:`ParserElement` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = Word(alphas)[1, ...]
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.leave_whitespace()
def _generateDefaultName(self) -> str:
return "-"
def __init__(
self,
exprs_arg: typing.Iterable[Union[ParserElement, str]],
savelist: bool = True,
) -> None:
# instantiate exprs as a list, converting strs to ParserElements
exprs: list[ParserElement] = [
self._literalStringClass(e) if isinstance(e, str) else e for e in exprs_arg
]
# convert any Ellipsis elements to SkipTo
if Ellipsis in exprs:
# Ellipsis cannot be the last element
if exprs[-1] is Ellipsis:
raise Exception("cannot construct And with sequence ending in ...")
tmp: list[ParserElement] = []
for cur_expr, next_expr in zip(exprs, exprs[1:]):
if cur_expr is Ellipsis:
tmp.append(SkipTo(next_expr)("_skipped*"))
else:
tmp.append(cur_expr)
exprs[:-1] = tmp
super().__init__(exprs, savelist)
if self.exprs:
self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs)
if not isinstance(self.exprs[0], White):
self.set_whitespace_chars(
self.exprs[0].whiteChars,
copy_defaults=self.exprs[0].copyDefaultWhiteChars,
)
self.skipWhitespace = self.exprs[0].skipWhitespace
else:
self.skipWhitespace = False
else:
self._may_return_empty = True
self.callPreparse = True
def streamline(self) -> ParserElement:
# collapse any _PendingSkip's
if self.exprs and any(
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
for e in self.exprs[:-1]
):
deleted_expr_marker = NoMatch()
for i, e in enumerate(self.exprs[:-1]):
if e is deleted_expr_marker:
continue
if (
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
):
e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
self.exprs[i + 1] = deleted_expr_marker
self.exprs = [e for e in self.exprs if e is not deleted_expr_marker]
super().streamline()
# link any IndentedBlocks to the prior expression
prev: ParserElement
cur: ParserElement
for prev, cur in zip(self.exprs, self.exprs[1:]):
# traverse cur or any first embedded expr of cur looking for an IndentedBlock
# (but watch out for recursive grammar)
seen = set()
while True:
if id(cur) in seen:
break
seen.add(id(cur))
if isinstance(cur, IndentedBlock):
prev.add_parse_action(
lambda s, l, t, cur_=cur: setattr(
cur_, "parent_anchor", col(l, s)
)
)
break
subs = cur.recurse()
next_first = next(iter(subs), None)
if next_first is None:
break
cur = typing.cast(ParserElement, next_first)
self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, do_actions=True):
# pass False as callPreParse arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(
instring, loc, do_actions, callPreParse=False
)
errorStop = False
for e in self.exprs[1:]:
# if isinstance(e, And._ErrorStop):
if type(e) is And._ErrorStop:
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, do_actions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(
instring, len(instring), self.errmsg, self
)
else:
loc, exprtokens = e._parse(instring, loc, do_actions)
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return self.append(other) # And([self, other])
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e._checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def _generateDefaultName(self) -> str:
inner = " ".join(str(e) for e in self.exprs)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return f"{{{inner}}}"
class Or(ParseExpression):
"""Requires that at least one :class:`ParserElement` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(
self, exprs: typing.Iterable[ParserElement], savelist: bool = False
) -> None:
super().__init__(exprs, savelist)
if self.exprs:
self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self._may_return_empty = True
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs)
self.saveAsList = any(e.saveAsList for e in self.exprs)
self.skipWhitespace = all(
e.skipWhitespace and not isinstance(e, White) for e in self.exprs
)
else:
self.saveAsList = False
return self
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
maxExcLoc = -1
maxException = None
matches: list[tuple[int, ParserElement]] = []
fatals: list[ParseFatalException] = []
if all(e.callPreparse for e in self.exprs):
loc = self.preParse(instring, loc)
for e in self.exprs:
try:
loc2 = e.try_parse(instring, loc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parser_element = e
fatals.append(pfe)
maxException = None
maxExcLoc = -1
except ParseException as err:
if not fatals:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
# re-evaluate all matches in descending order of length of match, in case attached actions
# might change whether or how much they match of the input.
matches.sort(key=itemgetter(0), reverse=True)
if not do_actions:
# no further conditions or parse actions to change the selection of
# alternative, so the first match will be the best match
best_expr = matches[0][1]
return best_expr._parse(instring, loc, do_actions)
longest: tuple[int, typing.Optional[ParseResults]] = -1, None
for loc1, expr1 in matches:
if loc1 <= longest[0]:
# already have a longer match than this one will deliver, we are done
return longest
try:
loc2, toks = expr1._parse(instring, loc, do_actions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
else:
if loc2 >= loc1:
return loc2, toks
# didn't match as much as before
elif loc2 > longest[0]:
longest = loc2, toks
if longest != (-1, None):
return longest
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element))))
max_fatal = fatals[0]
raise max_fatal
if maxException is not None:
# infer from this check that all alternatives failed at the current position
# so emit this collective error message instead of any single error message
parse_start_loc = self.preParse(instring, loc)
if maxExcLoc == parse_start_loc:
maxException.msg = self.errmsg or ""
raise maxException
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return self.append(other) # Or([self, other])
def _generateDefaultName(self) -> str:
return f"{{{' ^ '.join(str(e) for e in self.exprs)}}}"
def _setResultsName(self, name, list_all_matches=False) -> ParserElement:
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warning = (
"warn_multiple_tokens_in_named_alternation:"
f" setting results name {name!r} on {type(self).__name__} expression"
" will return a list of all parsed tokens in an And alternative,"
" in prior versions only the first token was returned; enclose"
" contained argument in Group"
)
warnings.warn(warning, stacklevel=3)
return super()._setResultsName(name, list_all_matches)
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParserElement` is found. If
more than one expression matches, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(
self, exprs: typing.Iterable[ParserElement], savelist: bool = False
) -> None:
super().__init__(exprs, savelist)
if self.exprs:
self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self._may_return_empty = True
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
if self.exprs:
self.saveAsList = any(e.saveAsList for e in self.exprs)
self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(
e.skipWhitespace and not isinstance(e, White) for e in self.exprs
)
else:
self.saveAsList = False
self._may_return_empty = True
return self
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
return e._parse(instring, loc, do_actions)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parser_element = e
raise
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
if maxException is not None:
# infer from this check that all alternatives failed at the current position
# so emit this collective error message instead of any individual error message
parse_start_loc = self.preParse(instring, loc)
if maxExcLoc == parse_start_loc:
maxException.msg = self.errmsg or ""
raise maxException
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return self.append(other) # MatchFirst([self, other])
def _generateDefaultName(self) -> str:
return f"{{{' | '.join(str(e) for e in self.exprs)}}}"
def _setResultsName(self, name, list_all_matches=False) -> ParserElement:
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warning = (
"warn_multiple_tokens_in_named_alternation:"
f" setting results name {name!r} on {type(self).__name__} expression"
" will return a list of all parsed tokens in an And alternative,"
" in prior versions only the first token was returned; enclose"
" contained argument in Group"
)
warnings.warn(warning, stacklevel=3)
return super()._setResultsName(name, list_all_matches)
class Each(ParseExpression):
"""Requires all given :class:`ParserElement` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
shape_spec.run_tests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(
self, exprs: typing.Iterable[ParserElement], savelist: bool = True
) -> None:
super().__init__(exprs, savelist)
if self.exprs:
self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs)
else:
self._may_return_empty = True
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def __iand__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
return self.append(other) # Each([self, other])
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs)
else:
self._may_return_empty = True
return self
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if self.initExprGroups:
self.opt1map = dict(
(id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
)
opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
opt2 = [
e
for e in self.exprs
if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
]
self.optionals = opt1 + opt2
self.multioptionals = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, _MultipleMatch)
]
self.multirequired = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, OneOrMore)
]
self.required = [
e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
multis = self.multioptionals[:]
matchOrder: list[ParserElement] = []
keepMatching = True
failed: list[ParserElement] = []
fatals: list[ParseFatalException] = []
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + multis
failed.clear()
fatals.clear()
for e in tmpExprs:
try:
tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parser_element = e
fatals.append(pfe)
failed.append(e)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
# look for any ParseFatalExceptions
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element))))
max_fatal = fatals[0]
raise max_fatal
if tmpReqd:
missing = ", ".join([str(e) for e in tmpReqd])
raise ParseException(
instring,
loc,
f"Missing one or more required elements ({missing})",
)
# add any unmatched Opts, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
total_results = ParseResults([])
for e in matchOrder:
loc, results = e._parse(instring, loc, do_actions)
total_results += results
return loc, total_results
def _generateDefaultName(self) -> str:
return f"{{{' & '.join(str(e) for e in self.exprs)}}}"
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False) -> None:
super().__init__(savelist)
if isinstance(expr, str_type):
expr_str = typing.cast(str, expr)
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr_str) # type: ignore[call-arg]
elif issubclass(type(self), self._literalStringClass):
expr = Literal(expr_str)
else:
expr = self._literalStringClass(Literal(expr_str)) # type: ignore[assignment, call-arg]
expr = typing.cast(ParserElement, expr)
self.expr = expr
if expr is not None:
self.mayIndexError = expr.mayIndexError
self._may_return_empty = expr.mayReturnEmpty
self.set_whitespace_chars(
expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def recurse(self) -> list[ParserElement]:
return [self.expr] if self.expr is not None else []
def parseImpl(self, instring, loc, do_actions=True):
if self.expr is None:
raise ParseException(instring, loc, "No expression defined", self)
try:
return self.expr._parse(instring, loc, do_actions, callPreParse=False)
except ParseSyntaxException:
raise
except ParseBaseException as pbe:
pbe.pstr = pbe.pstr or instring
pbe.loc = pbe.loc or loc
pbe.parser_element = pbe.parser_element or self
if not isinstance(self, Forward) and self.customName is not None:
if self.errmsg:
pbe.msg = self.errmsg
raise
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
super().leave_whitespace(recursive)
if recursive:
if self.expr is not None:
self.expr = self.expr.copy()
self.expr.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
super().ignore_whitespace(recursive)
if recursive:
if self.expr is not None:
self.expr = self.expr.copy()
self.expr.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
if not isinstance(other, Suppress) or other not in self.ignoreExprs:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self) -> ParserElement:
super().streamline()
if self.expr is not None:
self.expr.streamline()
return self
def _checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None) -> None:
warnings.warn(
"ParserElement.validate() is deprecated, and should not be used to check for left recursion",
DeprecationWarning,
stacklevel=2,
)
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self) -> str:
return f"{type(self).__name__}:({self.expr})"
# Compatibility synonyms
# fmt: off
leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace)
ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace)
# fmt: on
class IndentedBlock(ParseElementEnhance):
"""
Expression to match one or more expressions at a given indentation level.
Useful for parsing text where structure is implied by indentation (like Python source code).
"""
class _Indent(Empty):
def __init__(self, ref_col: int) -> None:
super().__init__()
self.errmsg = f"expected indent at column {ref_col}"
self.add_condition(lambda s, l, t: col(l, s) == ref_col)
class _IndentGreater(Empty):
def __init__(self, ref_col: int) -> None:
super().__init__()
self.errmsg = f"expected indent at column greater than {ref_col}"
self.add_condition(lambda s, l, t: col(l, s) > ref_col)
def __init__(
self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
) -> None:
super().__init__(expr, savelist=True)
# if recursive:
# raise NotImplementedError("IndentedBlock with recursive is not implemented")
self._recursive = recursive
self._grouped = grouped
self.parent_anchor = 1
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# advance parse position to non-whitespace by using an Empty()
# this should be the column to be used for all subsequent indented lines
anchor_loc = Empty().preParse(instring, loc)
# see if self.expr matches at the current location - if not it will raise an exception
# and no further work is necessary
self.expr.try_parse(instring, anchor_loc, do_actions=do_actions)
indent_col = col(anchor_loc, instring)
peer_detect_expr = self._Indent(indent_col)
inner_expr = Empty() + peer_detect_expr + self.expr
if self._recursive:
sub_indent = self._IndentGreater(indent_col)
nested_block = IndentedBlock(
self.expr, recursive=self._recursive, grouped=self._grouped
)
nested_block.set_debug(self.debug)
nested_block.parent_anchor = indent_col
inner_expr += Opt(sub_indent + nested_block)
inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
block = OneOrMore(inner_expr)
trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
if self._grouped:
wrapper = Group
else:
wrapper = lambda expr: expr # type: ignore[misc, assignment]
return (wrapper(block) + Optional(trailing_undent)).parseImpl(
instring, anchor_loc, do_actions
)
class AtStringStart(ParseElementEnhance):
"""Matches if expression matches at the beginning of the parse
string::
AtStringStart(Word(nums)).parse_string("123")
# prints ["123"]
AtStringStart(Word(nums)).parse_string(" 123")
# raises ParseException
"""
def __init__(self, expr: Union[ParserElement, str]) -> None:
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if loc != 0:
raise ParseException(instring, loc, "not found at string start")
return super().parseImpl(instring, loc, do_actions)
class AtLineStart(ParseElementEnhance):
r"""Matches if an expression matches at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (AtLineStart('AAA') + rest_of_line).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self, expr: Union[ParserElement, str]) -> None:
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if col(loc, instring) != 1:
raise ParseException(instring, loc, "not found at line start")
return super().parseImpl(instring, loc, do_actions)
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr: Union[ParserElement, str]) -> None:
super().__init__(expr)
self._may_return_empty = True
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, do_actions=do_actions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- ``expr`` - expression that must match prior to the current parse
location
- ``retreat`` - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, :class:`Literal`,
:class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
with a specified exact or maximum length, then the retreat
parameter is not required. Otherwise, retreat must be specified to
give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(self, expr: Union[ParserElement, str], retreat: int = 0) -> None:
super().__init__(expr)
self.expr = self.expr().leave_whitespace()
self._may_return_empty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str_type):
expr = typing.cast(str, expr)
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = f"not preceded by {expr}"
self.skipWhitespace = False
self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
def parseImpl(self, instring, loc=0, do_actions=True) -> ParseImplReturnType:
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg, self)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
return loc, ret
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[max(0, loc - self.retreat) : loc]
last_expr: ParseBaseException = ParseException(instring, loc, self.errmsg, self)
for offset in range(1, min(loc, self.retreat + 1) + 1):
try:
# print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
_, ret = test_expr._parse(instring_slice, len(instring_slice) - offset)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
return loc, ret
class Located(ParseElementEnhance):
"""
Decorates a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- ``locn_start`` - location where matched expression begins
- ``locn_end`` - location where matched expression ends
- ``value`` - the actual parsed results
Be careful if the input text contains ```` characters, you
may want to call :class:`ParserElement.parse_with_tabs`
Example::
wd = Word(alphas)
for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[0, ['ljsdf'], 5]
[8, ['lksdjjf'], 15]
[18, ['lkkjj'], 23]
"""
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
start = loc
loc, tokens = self.expr._parse(instring, start, do_actions, callPreParse=False)
ret_tokens = ParseResults([start, tokens, loc])
ret_tokens["locn_start"] = start
ret_tokens["value"] = tokens
ret_tokens["locn_end"] = loc
if self.resultsName:
# must return as a list, so that the name will be attached to the complete group
return loc, [ret_tokens]
else:
return loc, ret_tokens
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the ``'~'`` operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Opt(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infix_notation
boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...]
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__(self, expr: Union[ParserElement, str]) -> None:
super().__init__(expr)
# do NOT use self.leave_whitespace(), don't want to propagate to exprs
# self.leave_whitespace()
self.skipWhitespace = False
self._may_return_empty = True
self.errmsg = f"Found unwanted token, {self.expr}"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if self.expr.can_parse_next(instring, loc, do_actions=do_actions):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def _generateDefaultName(self) -> str:
return f"~{{{self.expr}}}"
class _MultipleMatch(ParseElementEnhance):
def __init__(
self,
expr: Union[str, ParserElement],
stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
stopOn: typing.Optional[Union[ParserElement, str]] = None,
) -> None:
super().__init__(expr)
stopOn = stopOn or stop_on
self.saveAsList = True
ender = stopOn
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.stopOn(ender)
def stopOn(self, ender) -> ParserElement:
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
return self
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = False
if self.not_ender is not None:
try_not_ender = self.not_ender.try_parse
check_ender = True
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, do_actions)
try:
hasIgnoreExprs = not not self.ignoreExprs
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, do_actions)
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def _setResultsName(self, name, list_all_matches=False) -> ParserElement:
if (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
for e in [self.expr] + self.expr.recurse():
if (
isinstance(e, ParserElement)
and e.resultsName
and (
Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
)
):
warning = (
"warn_ungrouped_named_tokens_in_collection:"
f" setting results name {name!r} on {type(self).__name__} expression"
f" collides with {e.resultsName!r} on contained expression"
)
warnings.warn(warning, stacklevel=3)
break
return super()._setResultsName(name, list_all_matches)
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- ``expr`` - expression that must match one or more times
- ``stop_on`` - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stop_on attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parse_string(text).pprint()
"""
def _generateDefaultName(self) -> str:
return f"{{{self.expr}}}..."
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``stop_on`` - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression) - (default= ``None``)
Example: similar to :class:`OneOrMore`
"""
def __init__(
self,
expr: Union[str, ParserElement],
stop_on: typing.Optional[Union[ParserElement, str]] = None,
*,
stopOn: typing.Optional[Union[ParserElement, str]] = None,
) -> None:
super().__init__(expr, stopOn=stopOn or stop_on)
self._may_return_empty = True
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
try:
return super().parseImpl(instring, loc, do_actions)
except (ParseException, IndexError):
return loc, ParseResults([], name=self.resultsName)
def _generateDefaultName(self) -> str:
return f"[{self.expr}]..."
class DelimitedList(ParseElementEnhance):
def __init__(
self,
expr: Union[str, ParserElement],
delim: Union[str, ParserElement] = ",",
combine: bool = False,
min: typing.Optional[int] = None,
max: typing.Optional[int] = None,
*,
allow_trailing_delim: bool = False,
) -> None:
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
If ``allow_trailing_delim`` is set to True, then the list may end with
a delimiter.
Example::
DelimitedList(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
DelimitedList(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
if isinstance(expr, str_type):
expr = ParserElement._literalStringClass(expr)
expr = typing.cast(ParserElement, expr)
if min is not None and min < 1:
raise ValueError("min must be greater than 0")
if max is not None and min is not None and max < min:
raise ValueError("max must be greater than, or equal to min")
self.content = expr
self.raw_delim = str(delim)
self.delim = delim
self.combine = combine
if not combine:
self.delim = Suppress(delim)
self.min = min or 1
self.max = max
self.allow_trailing_delim = allow_trailing_delim
delim_list_expr = self.content + (self.delim + self.content) * (
self.min - 1,
None if self.max is None else self.max - 1,
)
if self.allow_trailing_delim:
delim_list_expr += Opt(self.delim)
if self.combine:
delim_list_expr = Combine(delim_list_expr)
super().__init__(delim_list_expr, savelist=True)
def _generateDefaultName(self) -> str:
content_expr = self.content.streamline()
return f"{content_expr} [{self.raw_delim} {content_expr}]..."
class _NullToken:
def __bool__(self):
return False
def __str__(self):
return ""
class Opt(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``default`` (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
zip.run_tests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
__optionalNotMatched = _NullToken()
def __init__(
self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
) -> None:
super().__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self._may_return_empty = True
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
self_expr = self.expr
try:
loc, tokens = self_expr._parse(
instring, loc, do_actions, callPreParse=False
)
except (ParseException, IndexError):
default_value = self.defaultValue
if default_value is not self.__optionalNotMatched:
if self_expr.resultsName:
tokens = ParseResults([default_value])
tokens[self_expr.resultsName] = default_value
else:
tokens = [default_value] # type: ignore[assignment]
else:
tokens = [] # type: ignore[assignment]
return loc, tokens
def _generateDefaultName(self) -> str:
inner = str(self.expr)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return f"[{inner}]"
Optional = Opt
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- ``expr`` - target expression marking the end of the data to be skipped
- ``include`` - if ``True``, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element
list) (default= ``False``).
- ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the :class:`SkipTo` is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quoted_string)
string_data.set_parse_action(token_map(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.search_string(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: '6'
- desc: 'Intermittent system crash'
- issue_num: '101'
- sev: 'Critical'
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: '14'
- desc: "Spelling error on Login ('log|n')"
- issue_num: '94'
- sev: 'Cosmetic'
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: '47'
- desc: 'System slow when running too many reports'
- issue_num: '79'
- sev: 'Minor'
"""
def __init__(
self,
other: Union[ParserElement, str],
include: bool = False,
ignore: typing.Optional[Union[ParserElement, str]] = None,
fail_on: typing.Optional[Union[ParserElement, str]] = None,
*,
failOn: typing.Optional[Union[ParserElement, str]] = None,
) -> None:
super().__init__(other)
failOn = failOn or fail_on
self.ignoreExpr = ignore
self._may_return_empty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, str_type):
self.failOn = self._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = f"No match found for {self.expr}"
self.ignorer = Empty().leave_whitespace()
self._update_ignorer()
def _update_ignorer(self):
# rebuild internal ignore expr from current ignore exprs and assigned ignoreExpr
self.ignorer.ignoreExprs.clear()
for e in self.expr.ignoreExprs:
self.ignorer.ignore(e)
if self.ignoreExpr:
self.ignorer.ignore(self.ignoreExpr)
def ignore(self, expr):
super().ignore(expr)
self._update_ignorer()
def parseImpl(self, instring, loc, do_actions=True):
startloc = loc
instrlen = len(instring)
self_expr_parse = self.expr._parse
self_failOn_canParseNext = (
self.failOn.canParseNext if self.failOn is not None else None
)
ignorer_try_parse = self.ignorer.try_parse if self.ignorer.ignoreExprs else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if ignorer_try_parse is not None:
# advance past ignore expressions
prev_tmploc = tmploc
while 1:
try:
tmploc = ignorer_try_parse(instring, tmploc)
except ParseBaseException:
break
# see if all ignorers matched, but didn't actually ignore anything
if tmploc == prev_tmploc:
break
prev_tmploc = tmploc
try:
self_expr_parse(instring, tmploc, do_actions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = self_expr_parse(instring, loc, do_actions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the ``'<<'`` operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
fwd_expr << a | b | c
will actually be evaluated as::
(fwd_expr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwd_expr << (a | b | c)
Converting to use the ``'<<='`` operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__(
self, other: typing.Optional[Union[ParserElement, str]] = None
) -> None:
self.caller_frame = traceback.extract_stack(limit=2)[0]
super().__init__(other, savelist=False) # type: ignore[arg-type]
self.lshift_line = None
def __lshift__(self, other) -> Forward:
if hasattr(self, "caller_frame"):
del self.caller_frame
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
return NotImplemented
self.expr = other
self.streamlined = other.streamlined
self.mayIndexError = self.expr.mayIndexError
self._may_return_empty = self.expr.mayReturnEmpty
self.set_whitespace_chars(
self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
self.lshift_line = traceback.extract_stack(limit=2)[-2] # type: ignore[assignment]
return self
def __ilshift__(self, other) -> Forward:
if not isinstance(other, ParserElement):
return NotImplemented
return self << other
def __or__(self, other) -> ParserElement:
caller_line = traceback.extract_stack(limit=2)[-2]
if (
__diag__.warn_on_match_first_with_lshift_operator
and caller_line == self.lshift_line
and Diagnostics.warn_on_match_first_with_lshift_operator
not in self.suppress_warnings_
):
warnings.warn(
"warn_on_match_first_with_lshift_operator:"
" using '<<' operator with '|' is probably an error, use '<<='",
stacklevel=2,
)
ret = super().__or__(other)
return ret
def __del__(self):
# see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
if (
self.expr is None
and __diag__.warn_on_assignment_to_Forward
and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
):
warnings.warn_explicit(
"warn_on_assignment_to_Forward:"
" Forward defined here but no expression attached later using '<<=' or '<<'",
UserWarning,
filename=self.caller_frame.filename,
lineno=self.caller_frame.lineno,
)
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if (
self.expr is None
and __diag__.warn_on_parse_using_empty_Forward
and Diagnostics.warn_on_parse_using_empty_Forward
not in self.suppress_warnings_
):
# walk stack until parse_string, scan_string, search_string, or transform_string is found
parse_fns = (
"parse_string",
"scan_string",
"search_string",
"transform_string",
)
tb = traceback.extract_stack(limit=200)
for i, frm in enumerate(reversed(tb), start=1):
if frm.name in parse_fns:
stacklevel = i + 1
break
else:
stacklevel = 2
warnings.warn(
"warn_on_parse_using_empty_Forward:"
" Forward expression was never assigned a value, will not parse any input",
stacklevel=stacklevel,
)
if not ParserElement._left_recursion_enabled:
return super().parseImpl(instring, loc, do_actions)
# ## Bounded Recursion algorithm ##
# Recursion only needs to be processed at ``Forward`` elements, since they are
# the only ones that can actually refer to themselves. The general idea is
# to handle recursion stepwise: We start at no recursion, then recurse once,
# recurse twice, ..., until more recursion offers no benefit (we hit the bound).
#
# The "trick" here is that each ``Forward`` gets evaluated in two contexts
# - to *match* a specific recursion level, and
# - to *search* the bounded recursion level
# and the two run concurrently. The *search* must *match* each recursion level
# to find the best possible match. This is handled by a memo table, which
# provides the previous match to the next level match attempt.
#
# See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
#
# There is a complication since we not only *parse* but also *transform* via
# actions: We do not want to run the actions too often while expanding. Thus,
# we expand using `do_actions=False` and only run `do_actions=True` if the next
# recursion level is acceptable.
with ParserElement.recursion_lock:
memo = ParserElement.recursion_memos
try:
# we are parsing at a specific recursion expansion - use it as-is
prev_loc, prev_result = memo[loc, self, do_actions]
if isinstance(prev_result, Exception):
raise prev_result
return prev_loc, prev_result.copy()
except KeyError:
act_key = (loc, self, True)
peek_key = (loc, self, False)
# we are searching for the best recursion expansion - keep on improving
# both `do_actions` cases must be tracked separately here!
prev_loc, prev_peek = memo[peek_key] = (
loc - 1,
ParseException(
instring, loc, "Forward recursion without base case", self
),
)
if do_actions:
memo[act_key] = memo[peek_key]
while True:
try:
new_loc, new_peek = super().parseImpl(instring, loc, False)
except ParseException:
# we failed before getting any match - do not hide the error
if isinstance(prev_peek, Exception):
raise
new_loc, new_peek = prev_loc, prev_peek
# the match did not get better: we are done
if new_loc <= prev_loc:
if do_actions:
# replace the match for do_actions=False as well,
# in case the action did backtrack
prev_loc, prev_result = memo[peek_key] = memo[act_key]
del memo[peek_key], memo[act_key]
return prev_loc, copy.copy(prev_result)
del memo[peek_key]
return prev_loc, copy.copy(prev_peek)
# the match did get better: see if we can improve further
if do_actions:
try:
memo[act_key] = super().parseImpl(instring, loc, True)
except ParseException as e:
memo[peek_key] = memo[act_key] = (new_loc, e)
raise
prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
self.skipWhitespace = False
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
self.skipWhitespace = True
return self
def streamline(self) -> ParserElement:
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None) -> None:
warnings.warn(
"ParserElement.validate() is deprecated, and should not be used to check for left recursion",
DeprecationWarning,
stacklevel=2,
)
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self) -> str:
# Avoid infinite recursion by setting a temporary _defaultName
save_default_name = self._defaultName
self._defaultName = ": ..."
# Use the string representation of main expression.
try:
if self.expr is not None:
ret_string = str(self.expr)[:1000]
else:
ret_string = "None"
except Exception:
ret_string = "..."
self._defaultName = save_default_name
return f"{type(self).__name__}: {ret_string}"
def copy(self) -> ParserElement:
if self.expr is not None:
return super().copy()
else:
ret = Forward()
ret <<= self
return ret
def _setResultsName(self, name, list_all_matches=False) -> ParserElement:
# fmt: off
if (
__diag__.warn_name_set_on_empty_Forward
and Diagnostics.warn_name_set_on_empty_Forward not in self.suppress_warnings_
and self.expr is None
):
warning = (
"warn_name_set_on_empty_Forward:"
f" setting results name {name!r} on {type(self).__name__} expression"
" that has no contained expression"
)
warnings.warn(warning, stacklevel=3)
# fmt: on
return super()._setResultsName(name, list_all_matches)
# Compatibility synonyms
# fmt: off
leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace)
ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace)
# fmt: on
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseElementEnhance`, for converting parsed results.
"""
def __init__(self, expr: Union[ParserElement, str], savelist=False) -> None:
super().__init__(expr) # , savelist)
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parse_string('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(
self,
expr: ParserElement,
join_string: str = "",
adjacent: bool = True,
*,
joinString: typing.Optional[str] = None,
) -> None:
super().__init__(expr)
joinString = joinString if joinString is not None else join_string
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leave_whitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other) -> ParserElement:
if self.adjacent:
ParserElement.ignore(self, other)
else:
super().ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
The optional ``aslist`` argument when set to True will return the
parsed tokens as a Python list instead of a pyparsing ParseResults.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Opt(DelimitedList(term))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', 'a', 'b', '100']
func = ident + Group(Opt(DelimitedList(term)))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr: ParserElement, aslist: bool = False) -> None:
super().__init__(expr)
self.saveAsList = True
self._asPythonList = aslist
def postParse(self, instring, loc, tokenlist):
if self._asPythonList:
return ParseResults.List(
tokenlist.asList()
if isinstance(tokenlist, ParseResults)
else list(tokenlist)
)
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
The optional ``asdict`` argument when set to True will return the
parsed tokens as a Python dict instead of a pyparsing ParseResults.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
# print attributes as plain groups
print(attr_expr[1, ...].parse_string(text).dump())
# instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names
result = Dict(Group(attr_expr)[1, ...]).parse_string(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.as_dict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: 'light blue'
- posn: 'upper left'
- shape: 'SQUARE'
- texture: 'burlap'
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__(self, expr: ParserElement, asdict: bool = False) -> None:
super().__init__(expr)
self.saveAsList = True
self._asPythonDict = asdict
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = str(ikey).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
try:
dictvalue = tok.copy() # ParseResults(i)
except Exception:
exc = TypeError(
"could not extract dict values from parsed results"
" - Dict expression must contain Grouped expressions"
)
raise exc from None
del dictvalue[0]
if len(dictvalue) != 1 or (
isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self._asPythonDict:
return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
return [tokenlist] if self.resultsName else tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + (',' + wd)[...]
print(wd_list1.parse_string(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + (Suppress(',') + wd)[...]
print(wd_list2.parse_string(source))
# Skipped text (using '...') can be suppressed as well
source = "lead in START relevant text END trailing text"
start_marker = Keyword("START")
end_marker = Keyword("END")
find_body = Suppress(...) + start_marker + ... + end_marker
print(find_body.parse_string(source)
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
['START', 'relevant text ', 'END']
(See also :class:`DelimitedList`.)
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False) -> None:
if expr is ...:
expr = _PendingSkip(NoMatch())
super().__init__(expr)
def __add__(self, other) -> ParserElement:
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) + other
return super().__add__(other)
def __sub__(self, other) -> ParserElement:
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) - other
return super().__sub__(other)
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self) -> ParserElement:
return self
def trace_parse_action(f: ParseAction) -> ParseAction:
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:, , )"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@trace_parse_action
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = wd[1, ...].set_parse_action(remove_duplicate_chars)
print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
< 3:
thisFunc = f"{type(paArgs[0]).__name__}.{thisFunc}"
sys.stderr.write(f">>entering {thisFunc}(line: {line(l, s)!r}, {l}, {t!r})\n")
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write(
f"< str:
r"""Helper to easily define string ranges for use in :class:`Word`
construction. Borrows syntax from regexp ``'[]'`` string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
def _expanded(p):
if isinstance(p, ParseResults):
yield from (chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
else:
yield p
try:
return "".join(
[c for part in _reBracketExpr.parse_string(s).body for c in _expanded(part)]
)
except Exception as e:
return ""
def token_map(func, *args) -> ParseAction:
"""Helper to define a parse action by mapping a function to all
elements of a :class:`ParseResults` list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transform_string`::
hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16))
hex_ints.run_tests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).set_parse_action(token_map(str.upper))
upperword[1, ...].run_tests('''
my kingdom for a horse
''')
wd = Word(alphas).set_parse_action(token_map(str.title))
wd[1, ...].set_parse_action(' '.join).run_tests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
pa.__name__ = func_name
return pa
def autoname_elements() -> None:
"""
Utility to simplify mass-naming of parser elements, for
generating railroad diagram with named subdiagrams.
"""
# guard against _getframe not being implemented in the current Python
getframe_fn = getattr(sys, "_getframe", lambda _: None)
calling_frame = getframe_fn(1)
if calling_frame is None:
return
# find all locals in the calling frame that are ParserElements
calling_frame = typing.cast(types.FrameType, calling_frame)
for name, var in calling_frame.f_locals.items():
# if no custom name defined, set the name to the var name
if isinstance(var, ParserElement) and not var.customName:
var.set_name(name)
dbl_quoted_string = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
).set_name("string enclosed in double quotes")
sgl_quoted_string = Combine(
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).set_name("string enclosed in single quotes")
quoted_string = Combine(
(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name(
"double quoted string"
)
| (Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name(
"single quoted string"
)
).set_name("quoted string using single or double quotes")
python_quoted_string = Combine(
(Regex(r'"""(?:[^"\\]|""(?!")|"(?!"")|\\.)*', flags=re.MULTILINE) + '"""').set_name(
"multiline double quoted string"
)
^ (
Regex(r"'''(?:[^'\\]|''(?!')|'(?!'')|\\.)*", flags=re.MULTILINE) + "'''"
).set_name("multiline single quoted string")
^ (Regex(r'"(?:[^"\n\r\\]|(?:\\")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name(
"double quoted string"
)
^ (Regex(r"'(?:[^'\n\r\\]|(?:\\')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name(
"single quoted string"
)
).set_name("Python quoted string")
unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
# build list of built-in expressions, for future reference if a global default value
# gets updated
_builtin_exprs: list[ParserElement] = [
v for v in vars().values() if isinstance(v, ParserElement)
]
# Compatibility synonyms
# fmt: off
sglQuotedString = sgl_quoted_string
dblQuotedString = dbl_quoted_string
quotedString = quoted_string
unicodeString = unicode_string
lineStart = line_start
lineEnd = line_end
stringStart = string_start
stringEnd = string_end
nullDebugAction = replaced_by_pep8("nullDebugAction", null_debug_action)
traceParseAction = replaced_by_pep8("traceParseAction", trace_parse_action)
conditionAsParseAction = replaced_by_pep8("conditionAsParseAction", condition_as_parse_action)
tokenMap = replaced_by_pep8("tokenMap", token_map)
# fmt: on
venv\Lib\site-packages\pyparsing\exceptions.py
# exceptions.py
from __future__ import annotations
import copy
import re
import sys
import typing
from functools import cached_property
from .unicode import pyparsing_unicode as ppu
from .util import (
_collapse_string_to_ranges,
col,
line,
lineno,
replaced_by_pep8,
)
class _ExceptionWordUnicodeSet(
ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic
):
pass
_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums)
_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
loc: int
msg: str
pstr: str
parser_element: typing.Any # "ParserElement"
args: tuple[str, int, typing.Optional[str]]
__slots__ = (
"loc",
"msg",
"pstr",
"parser_element",
"args",
)
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self,
pstr: str,
loc: int = 0,
msg: typing.Optional[str] = None,
elem=None,
) -> None:
if msg is None:
msg, pstr = pstr, ""
self.loc = loc
self.msg = msg
self.pstr = pstr
self.parser_element = elem
self.args = (pstr, loc, msg)
@staticmethod
def explain_exception(exc: Exception, depth: int = 16) -> str:
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
"""
import inspect
from .core import ParserElement
if depth is None:
depth = sys.getrecursionlimit()
ret: list[str] = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(f"{'^':>{exc.column}}")
ret.append(f"{type(exc).__name__}: {exc}")
if depth <= 0 or exc.__traceback__ is None:
return "\n".join(ret)
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen: set[int] = set()
for ff in callers[-depth:]:
frm = ff[0]
f_self = frm.f_locals.get("self", None)
if isinstance(f_self, ParserElement):
if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")):
continue
if id(f_self) in seen:
continue
seen.add(id(f_self))
self_type = type(f_self)
ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}")
elif f_self is not None:
self_type = type(f_self)
ret.append(f"{self_type.__module__}.{self_type.__name__}")
else:
code = frm.f_code
if code.co_name in ("wrapper", ""):
continue
ret.append(code.co_name)
depth -= 1
if not depth:
break
return "\n".join(ret)
@classmethod
def _from_exception(cls, pe) -> ParseBaseException:
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element)
@cached_property
def line(self) -> str:
"""
Return the line of text where the exception occurred.
"""
return line(self.loc, self.pstr)
@cached_property
def lineno(self) -> int:
"""
Return the 1-based line number of text where the exception occurred.
"""
return lineno(self.loc, self.pstr)
@cached_property
def col(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
@cached_property
def column(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
@cached_property
def found(self) -> str:
if not self.pstr:
return ""
if self.loc >= len(self.pstr):
return "end of text"
# pull out next word at error location
found_match = _exception_word_extractor.match(self.pstr, self.loc)
if found_match is not None:
found_text = found_match.group(0)
else:
found_text = self.pstr[self.loc : self.loc + 1]
return repr(found_text).replace(r"\\", "\\")
# pre-PEP8 compatibility
@property
def parserElement(self):
return self.parser_element
@parserElement.setter
def parserElement(self, elem):
self.parser_element = elem
def copy(self):
return copy.copy(self)
def formatted_message(self) -> str:
found_phrase = f", found {self.found}" if self.found else ""
return f"{self.msg}{found_phrase} (at char {self.loc}), (line:{self.lineno}, col:{self.column})"
def __str__(self) -> str:
return self.formatted_message()
def __repr__(self):
return str(self)
def mark_input_line(
self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<"
) -> str:
"""
Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
markerString = marker_string if marker_string is not None else markerString
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = f"{line_str[:line_column]}{markerString}{line_str[line_column:]}"
return line_str.strip()
def explain(self, depth: int = 16) -> str:
"""
Method to translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Example::
# an expression to parse 3 integers
expr = pp.Word(pp.nums) * 3
try:
# a failing parse - the third integer is prefixed with "A"
expr.parse_string("123 456 A789")
except pp.ParseException as pe:
print(pe.explain(depth=0))
prints::
123 456 A789
^
ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `set_name` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
Note: pyparsing's default truncation of exception tracebacks may also truncate the
stack of expressions that are displayed in the ``explain`` output. To get the full listing
of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
"""
return self.explain_exception(self, depth)
# Compatibility synonyms
# fmt: off
markInputline = replaced_by_pep8("markInputline", mark_input_line)
# fmt: on
class ParseException(ParseBaseException):
"""
Exception thrown when a parse expression doesn't match the input string
Example::
integer = Word(nums).set_name("integer")
try:
integer.parse_string("ABC")
except ParseException as pe:
print(pe, f"column: {pe.column}")
prints::
Expected integer, found 'ABC' (at char 0), (line:1, col:1) column: 1
"""
class ParseFatalException(ParseBaseException):
"""
User-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately
"""
class ParseSyntaxException(ParseFatalException):
"""
Just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
class RecursiveGrammarException(Exception):
"""
Exception thrown by :class:`ParserElement.validate` if the
grammar could be left-recursive; parser may need to enable
left recursion using :class:`ParserElement.enable_left_recursion`
Deprecated: only used by deprecated method ParserElement.validate.
"""
def __init__(self, parseElementList) -> None:
self.parseElementTrace = parseElementList
def __str__(self) -> str:
return f"RecursiveGrammarException: {self.parseElementTrace}"
venv\Lib\site-packages\pyparsing\helpers.py
# helpers.py
import html.entities
import operator
import re
import sys
import typing
from . import __diag__
from .core import *
from .util import (
_bslash,
_flatten,
_escape_regex_range_chars,
make_compressed_re,
replaced_by_pep8,
)
#
# global helpers
#
def counted_array(
expr: ParserElement,
int_expr: typing.Optional[ParserElement] = None,
*,
intExpr: typing.Optional[ParserElement] = None,
) -> ParserElement:
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the
leading count token is suppressed.
If ``int_expr`` is specified, it should be a pyparsing expression
that produces an integer value.
Example::
counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
# if other fields must be parsed after the count but before the
# list items, give the fields results names and they will
# be preserved in the returned ParseResults:
count_with_metadata = integer + Word(alphas)("type")
typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
result = typed_array.parse_string("3 bool True True False")
print(result.dump())
# prints
# ['True', 'True', 'False']
# - items: ['True', 'True', 'False']
# - type: 'bool'
"""
intExpr = intExpr or int_expr
array_expr = Forward()
def count_field_parse_action(s, l, t):
nonlocal array_expr
n = t[0]
array_expr <<= (expr * n) if n else Empty()
# clear list contents, but keep any named results
del t[:]
if intExpr is None:
intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.set_name("arrayLen")
intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
return (intExpr + array_expr).set_name(f"(len) {expr}...")
def match_previous_literal(expr: ParserElement) -> ParserElement:
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = match_previous_literal(first)
match_expr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches a previous literal, will also match the leading
``"1:1"`` in ``"1:10"``. If this is not desired, use
:class:`match_previous_expr`. Do *not* use with packrat parsing
enabled.
"""
rep = Forward()
def copy_token_to_repeater(s, l, t):
if not t:
rep << Empty()
return
if len(t) == 1:
rep << t[0]
return
# flatten t tokens
tflat = _flatten(t.as_list())
rep << And(Literal(tt) for tt in tflat)
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
rep.set_name("(prev) " + str(expr))
return rep
def match_previous_expr(expr: ParserElement) -> ParserElement:
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = match_previous_expr(first)
match_expr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches by expressions, will *not* match the leading ``"1:1"``
in ``"1:10"``; the expressions are evaluated first, and then
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copy_token_to_repeater(s, l, t):
matchTokens = _flatten(t.as_list())
def must_match_these_tokens(s, l, t):
theseTokens = _flatten(t.as_list())
if theseTokens != matchTokens:
raise ParseException(
s, l, f"Expected {matchTokens}, found{theseTokens}"
)
rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
rep.set_name("(prev) " + str(expr))
return rep
def one_of(
strs: Union[typing.Iterable[str], str],
caseless: bool = False,
use_regex: bool = True,
as_keyword: bool = False,
*,
useRegex: bool = True,
asKeyword: bool = False,
) -> ParserElement:
"""Helper to quickly define a set of alternative :class:`Literal` s,
and makes sure to do longest-first testing when there is a conflict,
regardless of the input order, but returns
a :class:`MatchFirst` for best performance.
Parameters:
- ``strs`` - a string of space-delimited literals, or a collection of
string literals
- ``caseless`` - treat all literals as caseless - (default= ``False``)
- ``use_regex`` - as an optimization, will
generate a :class:`Regex` object; otherwise, will generate
a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if
creating a :class:`Regex` raises an exception) - (default= ``True``)
- ``as_keyword`` - enforce :class:`Keyword`-style matching on the
generated expressions - (default= ``False``)
- ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
but will be removed in a future release
Example::
comp_oper = one_of("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
asKeyword = asKeyword or as_keyword
useRegex = useRegex and use_regex
if (
isinstance(caseless, str_type)
and __diag__.warn_on_multiple_string_args_to_oneof
):
warnings.warn(
"warn_on_multiple_string_args_to_oneof:"
" More than one string argument passed to one_of, pass"
" choices as a list or space-delimited string",
stacklevel=2,
)
if caseless:
is_equal = lambda a, b: a.upper() == b.upper()
masks = lambda a, b: b.upper().startswith(a.upper())
else:
is_equal = operator.eq
masks = lambda a, b: b.startswith(a)
symbols: list[str]
if isinstance(strs, str_type):
strs = typing.cast(str, strs)
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
raise TypeError("Invalid argument to one_of, expected string or iterable")
if not symbols:
return NoMatch()
# reorder given symbols to take care to avoid masking longer choices with shorter ones
# (but only if the given symbols are not just single characters)
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1 :]):
if is_equal(other, cur):
del symbols[i + j + 1]
break
if len(other) > len(cur) and masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
break
else:
i += 1
if useRegex:
re_flags: int = re.IGNORECASE if caseless else 0
try:
if all(len(sym) == 1 for sym in symbols):
# symbols are just single characters, create range regex pattern
patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]"
else:
patt = "|".join(re.escape(sym) for sym in symbols)
# wrap with \b word break markers if defining as keywords
if asKeyword:
patt = rf"\b(?:{patt})\b"
ret = Regex(patt, flags=re_flags)
ret.set_name(" | ".join(re.escape(s) for s in symbols))
if caseless:
# add parse action to return symbols as specified, not in random
# casing as found in input string
symbol_map = {sym.lower(): sym for sym in symbols}
ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
return ret
except re.error:
warnings.warn(
"Exception creating Regex for one_of, building MatchFirst", stacklevel=2
)
# last resort, just use MatchFirst of Token class corresponding to caseless
# and asKeyword settings
CASELESS = KEYWORD = True
parse_element_class = {
(CASELESS, KEYWORD): CaselessKeyword,
(CASELESS, not KEYWORD): CaselessLiteral,
(not CASELESS, KEYWORD): Keyword,
(not CASELESS, not KEYWORD): Literal,
}[(caseless, asKeyword)]
return MatchFirst(parse_element_class(sym) for sym in symbols).set_name(
" | ".join(symbols)
)
def dict_of(key: ParserElement, value: ParserElement) -> Dict:
"""Helper to easily and clearly define a dictionary by specifying
the respective patterns for the key and value. Takes care of
defining the :class:`Dict`, :class:`ZeroOrMore`, and
:class:`Group` tokens in the proper order. The key pattern
can include delimiting markers or punctuation, as long as they are
suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the :class:`Dict` results
can include named token fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
print(attr_expr[1, ...].parse_string(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
# similar to Dict, but simpler call format
result = dict_of(attr_label, attr_value).parse_string(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.as_dict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: 'light blue'
- posn: 'upper left'
- shape: 'SQUARE'
- texture: 'burlap'
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(OneOrMore(Group(key + value)))
def original_text_for(
expr: ParserElement, as_string: bool = True, *, asString: bool = True
) -> ParserElement:
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns a string containing the original parsed text.
If the optional ``as_string`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`original_text_for` contains expressions with defined
results names, you must set ``as_string`` to ``False`` if you
want to preserve those results name values.
The ``asString`` pre-PEP8 argument is retained for compatibility,
but will be removed in a future release.
Example::
src = "this is test bold text normal text "
for tag in ("b", "i"):
opener, closer = make_html_tags(tag)
patt = original_text_for(opener + ... + closer)
print(patt.search_string(src)[0])
prints::
[' bold text']
['text']
"""
asString = asString and as_string
locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start : t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
matchExpr.set_parse_action(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
return matchExpr
def ungroup(expr: ParserElement) -> ParserElement:
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).add_parse_action(lambda t: t[0])
def locatedExpr(expr: ParserElement) -> ParserElement:
"""
(DEPRECATED - future code should use the :class:`Located` class)
Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- ``locn_start`` - location where matched expression begins
- ``locn_end`` - location where matched expression ends
- ``value`` - the actual parsed results
Be careful if the input text contains ```` characters, you
may want to call :class:`ParserElement.parse_with_tabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
return Group(
locator("locn_start")
+ expr("value")
+ locator.copy().leaveWhitespace()("locn_end")
)
# define special default value to permit None as a significant value for
# ignore_expr
_NO_IGNORE_EXPR_GIVEN = NoMatch()
def nested_expr(
opener: Union[str, ParserElement] = "(",
closer: Union[str, ParserElement] = ")",
content: typing.Optional[ParserElement] = None,
ignore_expr: ParserElement = _NO_IGNORE_EXPR_GIVEN,
*,
ignoreExpr: ParserElement = _NO_IGNORE_EXPR_GIVEN,
) -> ParserElement:
"""Helper method for defining nested lists enclosed in opening and
closing delimiters (``"("`` and ``")"`` are the default).
Parameters:
- ``opener`` - opening character for a nested list
(default= ``"("``); can also be a pyparsing expression
- ``closer`` - closing character for a nested list
(default= ``")"``); can also be a pyparsing expression
- ``content`` - expression for items within the nested lists
(default= ``None``)
- ``ignore_expr`` - expression for ignoring opening and closing delimiters
(default= :class:`quoted_string`)
- ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
but will be removed in a future release
If an expression is not provided for the content argument, the
nested expression will capture all whitespace-delimited content
between delimiters as a list of separate values.
Use the ``ignore_expr`` argument to define expressions that may
contain opening or closing characters that should not be treated as
opening or closing characters for nesting, such as quoted_string or
a comment expression. Specify multiple expressions using an
:class:`Or` or :class:`MatchFirst`. The default is
:class:`quoted_string`, but if no expressions are to be ignored, then
pass ``None`` for this argument.
Example::
data_type = one_of("void int short long char float double")
decl_data_type = Combine(data_type + Opt(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR, RPAR = map(Suppress, "()")
code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Opt(DelimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(c_style_comment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.search_string(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if ignoreExpr != ignore_expr:
ignoreExpr = ignore_expr if ignoreExpr is _NO_IGNORE_EXPR_GIVEN else ignoreExpr
if ignoreExpr is _NO_IGNORE_EXPR_GIVEN:
ignoreExpr = quoted_string()
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, str_type) and isinstance(closer, str_type):
opener = typing.cast(str, opener)
closer = typing.cast(str, closer)
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
exact=1,
)
)
)
else:
content = Combine(
Empty()
+ CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
)
)
else:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
)
else:
content = Combine(
OneOrMore(
~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
)
else:
raise ValueError(
"opening and closing arguments must be strings if no content expression is given"
)
# for these internally-created context expressions, simulate whitespace-skipping
if ParserElement.DEFAULT_WHITE_CHARS:
content.set_parse_action(
lambda t: t[0].strip(ParserElement.DEFAULT_WHITE_CHARS)
)
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
)
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.set_name(f"nested {opener}{closer} expression")
# don't override error message from content expressions
ret.errmsg = None
return ret
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, str_type):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Opt("/", default=[False])("empty").set_parse_action(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
else:
tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
printables, exclude_chars=">"
)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(
ZeroOrMore(
Group(
tagAttrName.set_parse_action(lambda t: t[0].lower())
+ Opt(Suppress("=") + tagAttrValue)
)
)
)
+ Opt("/", default=[False])("empty").set_parse_action(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
closeTag = Combine(Literal("") + tagStr + ">", adjacent=False)
openTag.set_name(f"<{resname}>")
# add start results name in parse action now that ungrouped names are not reported at two levels
openTag.add_parse_action(
lambda t: t.__setitem__(
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
)
)
closeTag = closeTag(
"end" + "".join(resname.replace(":", " ").title().split())
).set_name(f"{resname}>")
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag
def make_html_tags(
tag_str: Union[str, ParserElement],
) -> tuple[ParserElement, ParserElement]:
"""Helper to construct opening and closing tag expressions for HTML,
given a tag name. Matches tags in either upper or lower case,
attributes with namespaces and with quoted or unquoted values.
Example::
text = '
# results.py
from __future__ import annotations
import collections
from collections.abc import (
MutableMapping,
Mapping,
MutableSequence,
Iterator,
Iterable,
)
import pprint
from typing import Any
from .util import replaced_by_pep8
str_type: tuple[type, ...] = (str, bytes)
_generator_type = type((_ for _ in ()))
class _ParseResultsWithOffset:
tup: tuple[ParseResults, int]
__slots__ = ["tup"]
def __init__(self, p1: ParseResults, p2: int) -> None:
self.tup: tuple[ParseResults, int] = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __getstate__(self):
return self.tup
def __setstate__(self, *args):
self.tup = args[0]
class ParseResults:
"""Structured parse results, to provide multiple means of access to
the parsed data:
- as a list (``len(results)``)
- by list index (``results[0], results[1]``, etc.)
- by attribute (``results.`` - see :class:`ParserElement.set_results_name`)
Example::
integer = Word(nums)
date_str = (integer.set_results_name("year") + '/'
+ integer.set_results_name("month") + '/'
+ integer.set_results_name("day"))
# equivalent form:
# date_str = (integer("year") + '/'
# + integer("month") + '/'
# + integer("day"))
# parse_string returns a ParseResults object
result = date_str.parse_string("1999/12/31")
def test(s, fn=repr):
print(f"{s} -> {fn(eval(s))}")
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: '31'
- month: '12'
- year: '1999'
"""
_null_values: tuple[Any, ...] = (None, [], ())
_name: str
_parent: ParseResults
_all_names: set[str]
_modal: bool
_toklist: list[Any]
_tokdict: dict[str, Any]
__slots__ = (
"_name",
"_parent",
"_all_names",
"_modal",
"_toklist",
"_tokdict",
)
class List(list):
"""
Simple wrapper class to distinguish parsed list results that should be preserved
as actual Python lists, instead of being converted to :class:`ParseResults`::
LBRACK, RBRACK = map(pp.Suppress, "[]")
element = pp.Forward()
item = ppc.integer
element_list = LBRACK + pp.DelimitedList(element) + RBRACK
# add parse actions to convert from ParseResults to actual Python collection types
def as_python_list(t):
return pp.ParseResults.List(t.as_list())
element_list.add_parse_action(as_python_list)
element <<= item | element_list
element.run_tests('''
100
[2,3,4]
[[2, 1],3,4]
[(2, 1),3,4]
(2,3,4)
''', post_parse=lambda s, r: (r[0], type(r[0])))
prints::
100
(100, )
[2,3,4]
([2, 3, 4], )
[[2, 1],3,4]
([[2, 1], 3, 4], )
(Used internally by :class:`Group` when `aslist=True`.)
"""
def __new__(cls, contained=None):
if contained is None:
contained = []
if not isinstance(contained, list):
raise TypeError(
f"{cls.__name__} may only be constructed with a list, not {type(contained).__name__}"
)
return list.__new__(cls)
def __new__(cls, toklist=None, name=None, **kwargs):
if isinstance(toklist, ParseResults):
return toklist
self = object.__new__(cls)
self._name = None
self._parent = None
self._all_names = set()
if toklist is None:
self._toklist = []
elif isinstance(toklist, (list, _generator_type)):
self._toklist = (
[toklist[:]]
if isinstance(toklist, ParseResults.List)
else list(toklist)
)
else:
self._toklist = [toklist]
self._tokdict = dict()
return self
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
) -> None:
self._tokdict: dict[str, _ParseResultsWithOffset]
self._modal = modal
if name is None or name == "":
return
if isinstance(name, int):
name = str(name)
if not modal:
self._all_names = {name}
self._name = name
if toklist in self._null_values:
return
if isinstance(toklist, (str_type, type)):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
self[name]._name = name
return
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
if toklist is not self:
self[name] = toklist
else:
self._name = name
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self._toklist[i]
if i not in self._all_names:
return self._tokdict[i][-1][0]
return ParseResults([v[0] for v in self._tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self._tokdict[k] = self._tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self._toklist[k] = v
sub = v
else:
self._tokdict[k] = self._tokdict.get(k, []) + [
_ParseResultsWithOffset(v, 0)
]
sub = v
if isinstance(sub, ParseResults):
sub._parent = self
def __delitem__(self, i):
if not isinstance(i, (int, slice)):
del self._tokdict[i]
return
mylen = len(self._toklist)
del self._toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for occurrences in self._tokdict.values():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position - (position > j)
)
def __contains__(self, k) -> bool:
return k in self._tokdict
def __len__(self) -> int:
return len(self._toklist)
def __bool__(self) -> bool:
return not not (self._toklist or self._tokdict)
def __iter__(self) -> Iterator:
return iter(self._toklist)
def __reversed__(self) -> Iterator:
return iter(self._toklist[::-1])
def keys(self):
return iter(self._tokdict)
def values(self):
return (self[k] for k in self.keys())
def items(self):
return ((k, self[k]) for k in self.keys())
def haskeys(self) -> bool:
"""
Since ``keys()`` returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return not not self._tokdict
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
def remove_first(tokens):
tokens.pop(0)
numlist.add_parse_action(remove_first)
print(numlist.parse_string("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + Word(nums)[1, ...]
print(patt.parse_string("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.add_parse_action(remove_LABEL)
print(patt.parse_string("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: 'AAB'
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == "default":
args = (args[0], v)
else:
raise TypeError(f"pop() got an unexpected keyword argument {k!r}")
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, default_value=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given ``default_value`` or ``None`` if no
``default_value`` is specified.
Similar to ``dict.get()``.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return default_value
def insert(self, index, ins_string):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to ``list.insert()``.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
numlist.add_parse_action(insert_locn)
print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
"""
self._toklist.insert(index, ins_string)
# fixup indices in token dictionary
for occurrences in self._tokdict.values():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position + (position > index)
)
def append(self, item):
"""
Add single element to end of ``ParseResults`` list of elements.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
numlist.add_parse_action(append_sum)
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
"""
self._toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ``ParseResults`` list of elements.
Example::
patt = Word(alphas)[1, ...]
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
patt.add_parse_action(make_palindrome)
print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self.__iadd__(itemseq)
else:
self._toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self._toklist[:]
self._tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
if name.startswith("__"):
raise AttributeError(name)
return ""
def __add__(self, other: ParseResults) -> ParseResults:
ret = self.copy()
ret += other
return ret
def __iadd__(self, other: ParseResults) -> ParseResults:
if not other:
return self
if other._tokdict:
offset = len(self._toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other._tokdict.items()
otherdictitems = [
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for k, vlist in otheritems
for v in vlist
]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0]._parent = self
self._toklist += other._toklist
self._all_names |= other._all_names
return self
def __radd__(self, other) -> ParseResults:
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self) -> str:
return f"{type(self).__name__}({self._toklist!r}, {self.as_dict()})"
def __str__(self) -> str:
return (
"["
+ ", ".join(
[
str(i) if isinstance(i, ParseResults) else repr(i)
for i in self._toklist
]
)
+ "]"
)
def _asStringList(self, sep=""):
out = []
for item in self._toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(str(item))
return out
def as_list(self, *, flatten: bool = False) -> list:
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
If flatten is True, all the nesting levels in the returned list are collapsed.
Example::
patt = Word(alphas)[1, ...]
result = patt.parse_string("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj']
# Use as_list() to create an actual list
result_list = result.as_list()
print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj']
"""
def flattened(pr):
to_visit = collections.deque([*self])
while to_visit:
to_do = to_visit.popleft()
if isinstance(to_do, ParseResults):
to_visit.extendleft(to_do[::-1])
else:
yield to_do
if flatten:
return [*flattened(self)]
else:
return [
res.as_list() if isinstance(res, ParseResults) else res
for res in self._toklist
]
def as_dict(self) -> dict:
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string('12/31/1999')
print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.as_dict()
print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
def to_item(obj):
if isinstance(obj, ParseResults):
return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
else:
return obj
return dict((k, to_item(v)) for k, v in self.items())
def copy(self) -> ParseResults:
"""
Returns a new shallow copy of a :class:`ParseResults` object. `ParseResults`
items contained within the source are shared with the copy. Use
:class:`ParseResults.deepcopy()` to create a copy with its own separate
content values.
"""
ret = ParseResults(self._toklist)
ret._tokdict = self._tokdict.copy()
ret._parent = self._parent
ret._all_names |= self._all_names
ret._name = self._name
return ret
def deepcopy(self) -> ParseResults:
"""
Returns a new deep copy of a :class:`ParseResults` object.
"""
ret = self.copy()
# replace values with copies if they are of known mutable types
for i, obj in enumerate(self._toklist):
if isinstance(obj, ParseResults):
ret._toklist[i] = obj.deepcopy()
elif isinstance(obj, (str, bytes)):
pass
elif isinstance(obj, MutableMapping):
ret._toklist[i] = dest = type(obj)()
for k, v in obj.items():
dest[k] = v.deepcopy() if isinstance(v, ParseResults) else v
elif isinstance(obj, Iterable):
ret._toklist[i] = type(obj)(
v.deepcopy() if isinstance(v, ParseResults) else v for v in obj # type: ignore[call-arg]
)
return ret
def get_name(self) -> str | None:
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = user_data[1, ...]
result = user_info.parse_string("22 111-22-3333 #221B")
for item in result:
print(item.get_name(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self._name:
return self._name
elif self._parent:
par: ParseResults = self._parent
parent_tokdict_items = par._tokdict.items()
return next(
(
k
for k, vlist in parent_tokdict_items
for v, loc in vlist
if v is self
),
None,
)
elif (
len(self) == 1
and len(self._tokdict) == 1
and next(iter(self._tokdict.values()))[0][1] in (0, -1)
):
return next(iter(self._tokdict.keys()))
else:
return None
def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
"""
Diagnostic method for listing out the contents of
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
that this string can be embedded in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string('1999/12/31')
print(result.dump())
prints::
['1999', '/', '12', '/', '31']
- day: '31'
- month: '12'
- year: '1999'
"""
out = []
NL = "\n"
out.append(indent + str(self.as_list()) if include_list else "")
if not full:
return "".join(out)
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append(f"{indent}{(' ' * _depth)}- {k}: ")
if not isinstance(v, ParseResults):
out.append(repr(v))
continue
if not v:
out.append(str(v))
continue
out.append(
v.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
)
)
if not any(isinstance(vv, ParseResults) for vv in self):
return "".join(out)
v = self
incr = " "
nl = "\n"
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
vv_dump = vv.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
)
out.append(
f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}"
)
else:
out.append(
f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}"
)
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the
`pprint `_ module.
Accepts additional positional or keyword args as defined for
`pprint.pprint `_ .
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(DelimitedList(term)))
result = func.parse_string("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.as_list(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (
self._toklist,
(
self._tokdict.copy(),
None,
self._all_names,
self._name,
),
)
def __setstate__(self, state):
self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
self._all_names = set(inAccumNames)
self._parent = None
def __getnewargs__(self):
return self._toklist, self._name
def __dir__(self):
return dir(type(self)) + list(self.keys())
@classmethod
def from_dict(cls, other, name=None) -> ParseResults:
"""
Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
name-value relations as results names. If an optional ``name`` argument is
given, a nested ``ParseResults`` will be returned.
"""
def is_iterable(obj):
try:
iter(obj)
except Exception:
return False
# str's are iterable, but in pyparsing, we don't want to iterate over them
else:
return not isinstance(obj, str_type)
ret = cls([])
for k, v in other.items():
if isinstance(v, Mapping):
ret += cls.from_dict(v, name=k)
else:
ret += cls([v], name=k, asList=is_iterable(v))
if name is not None:
ret = cls([ret], name=name)
return ret
asList = as_list
"""Deprecated - use :class:`as_list`"""
asDict = as_dict
"""Deprecated - use :class:`as_dict`"""
getName = get_name
"""Deprecated - use :class:`get_name`"""
MutableMapping.register(ParseResults)
MutableSequence.register(ParseResults)
venv\Lib\site-packages\pyparsing\testing.py
# testing.py
from contextlib import contextmanager
import re
import typing
from .core import (
ParserElement,
ParseException,
Keyword,
__diag__,
__compat__,
)
class pyparsing_test:
"""
namespace class for classes useful in writing unit tests
"""
class reset_pyparsing_context:
"""
Context manager to be used when writing unit tests that modify pyparsing config values:
- packrat parsing
- bounded recursion parsing
- default whitespace characters.
- default keyword characters
- literal string auto-conversion class
- __diag__ settings
Example::
with reset_pyparsing_context():
# test that literals used to construct a grammar are automatically suppressed
ParserElement.inlineLiteralsUsing(Suppress)
term = Word(alphas) | Word(nums)
group = Group('(' + term[...] + ')')
# assert that the '()' characters are not included in the parsed tokens
self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
# after exiting context manager, literals are converted to Literal expressions again
"""
def __init__(self):
self._save_context = {}
def save(self):
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
self._save_context["literal_string_class"] = (
ParserElement._literalStringClass
)
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
if ParserElement._packratEnabled:
self._save_context["packrat_cache_size"] = (
ParserElement.packrat_cache.size
)
else:
self._save_context["packrat_cache_size"] = None
self._save_context["packrat_parse"] = ParserElement._parse
self._save_context["recursion_enabled"] = (
ParserElement._left_recursion_enabled
)
self._save_context["__diag__"] = {
name: getattr(__diag__, name) for name in __diag__._all_names
}
self._save_context["__compat__"] = {
"collect_all_And_tokens": __compat__.collect_all_And_tokens
}
return self
def restore(self):
# reset pyparsing global state
if (
ParserElement.DEFAULT_WHITE_CHARS
!= self._save_context["default_whitespace"]
):
ParserElement.set_default_whitespace_chars(
self._save_context["default_whitespace"]
)
ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
ParserElement.inlineLiteralsUsing(
self._save_context["literal_string_class"]
)
for name, value in self._save_context["__diag__"].items():
(__diag__.enable if value else __diag__.disable)(name)
ParserElement._packratEnabled = False
if self._save_context["packrat_enabled"]:
ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
else:
ParserElement._parse = self._save_context["packrat_parse"]
ParserElement._left_recursion_enabled = self._save_context[
"recursion_enabled"
]
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
return self
def copy(self):
ret = type(self)()
ret._save_context.update(self._save_context)
return ret
def __enter__(self):
return self.save()
def __exit__(self, *args):
self.restore()
class TestParseResultsAsserts:
"""
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
"""
def assertParseResultsEquals(
self, result, expected_list=None, expected_dict=None, msg=None
):
"""
Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
and compare any defined results names with an optional ``expected_dict``.
"""
if expected_list is not None:
self.assertEqual(expected_list, result.as_list(), msg=msg)
if expected_dict is not None:
self.assertEqual(expected_dict, result.as_dict(), msg=msg)
def assertParseAndCheckList(
self, expr, test_string, expected_list, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
"""
result = expr.parse_string(test_string, parse_all=True)
if verbose:
print(result.dump())
else:
print(result.as_list())
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
def assertParseAndCheckDict(
self, expr, test_string, expected_dict, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
"""
result = expr.parse_string(test_string, parseAll=True)
if verbose:
print(result.dump())
else:
print(result.as_list())
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
def assertRunTestResults(
self, run_tests_report, expected_parse_results=None, msg=None
):
"""
Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
Finally, asserts that the overall ``runTests()`` success value is ``True``.
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
"""
run_test_success, run_test_results = run_tests_report
if expected_parse_results is None:
self.assertTrue(
run_test_success, msg=msg if msg is not None else "failed runTests"
)
return
merged = [
(*rpt, expected)
for rpt, expected in zip(run_test_results, expected_parse_results)
]
for test_string, result, expected in merged:
# expected should be a tuple containing a list and/or a dict or an exception,
# and optional failure message string
# an empty tuple will skip any result validation
fail_msg = next((exp for exp in expected if isinstance(exp, str)), None)
expected_exception = next(
(
exp
for exp in expected
if isinstance(exp, type) and issubclass(exp, Exception)
),
None,
)
if expected_exception is not None:
with self.assertRaises(
expected_exception=expected_exception, msg=fail_msg or msg
):
if isinstance(result, Exception):
raise result
else:
expected_list = next(
(exp for exp in expected if isinstance(exp, list)), None
)
expected_dict = next(
(exp for exp in expected if isinstance(exp, dict)), None
)
if (expected_list, expected_dict) != (None, None):
self.assertParseResultsEquals(
result,
expected_list=expected_list,
expected_dict=expected_dict,
msg=fail_msg or msg,
)
else:
# warning here maybe?
print(f"no validation for {test_string!r}")
# do this last, in case some specific test results can be reported instead
self.assertTrue(
run_test_success, msg=msg if msg is not None else "failed runTests"
)
@contextmanager
def assertRaisesParseException(
self, exc_type=ParseException, expected_msg=None, msg=None
):
if expected_msg is not None:
if isinstance(expected_msg, str):
expected_msg = re.escape(expected_msg)
with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx:
yield ctx
else:
with self.assertRaises(exc_type, msg=msg) as ctx:
yield ctx
@staticmethod
def with_line_numbers(
s: str,
start_line: typing.Optional[int] = None,
end_line: typing.Optional[int] = None,
expand_tabs: bool = True,
eol_mark: str = "|",
mark_spaces: typing.Optional[str] = None,
mark_control: typing.Optional[str] = None,
*,
indent: typing.Union[str, int] = "",
base_1: bool = True,
) -> str:
"""
Helpful method for debugging a parser - prints a string with line and column numbers.
(Line and column numbers are 1-based by default - if debugging a parse action,
pass base_1=False, to correspond to the loc value passed to the parse action.)
:param s: tuple(bool, str - string to be printed with line and column numbers
:param start_line: int - (optional) starting line number in s to print (default=1)
:param end_line: int - (optional) ending line number in s to print (default=len(s))
:param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
:param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
:param mark_spaces: str - (optional) special character to display in place of spaces
:param mark_control: str - (optional) convert non-printing control characters to a placeholding
character; valid values:
- "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
- any single character string - replace control characters with given string
- None (default) - string is displayed as-is
:param indent: str | int - (optional) string to indent with line and column numbers; if an int
is passed, converted to " " * indent
:param base_1: bool - (optional) whether to label string using base 1; if False, string will be
labeled based at 0 (default=True)
:return: str - input string with leading line numbers and column number headers
"""
if expand_tabs:
s = s.expandtabs()
if isinstance(indent, int):
indent = " " * indent
indent = indent.expandtabs()
if mark_control is not None:
mark_control = typing.cast(str, mark_control)
if mark_control == "unicode":
transtable_map = {
c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))
}
transtable_map[127] = 0x2421
tbl = str.maketrans(transtable_map)
eol_mark = ""
else:
ord_mark_control = ord(mark_control)
tbl = str.maketrans(
{c: ord_mark_control for c in list(range(0, 32)) + [127]}
)
s = s.translate(tbl)
if mark_spaces is not None and mark_spaces != " ":
if mark_spaces == "unicode":
tbl = str.maketrans({9: 0x2409, 32: 0x2423})
s = s.translate(tbl)
else:
s = s.replace(" ", mark_spaces)
if start_line is None:
start_line = 0
if end_line is None:
end_line = len(s)
end_line = min(end_line, len(s))
start_line = min(max(0, start_line), end_line)
if mark_control != "unicode":
s_lines = s.splitlines()[start_line - base_1 : end_line]
else:
s_lines = [
line + "␊" for line in s.split("␊")[start_line - base_1 : end_line]
]
if not s_lines:
return ""
lineno_width = len(str(end_line))
max_line_len = max(len(line) for line in s_lines)
lead = indent + " " * (lineno_width + 1)
if max_line_len >= 99:
header0 = (
lead
+ ("" if base_1 else " ")
+ "".join(
f"{' ' * 99}{(i + 1) % 100}"
for i in range(1 if base_1 else 0, max(max_line_len // 100, 1))
)
+ "\n"
)
else:
header0 = ""
header1 = (
("" if base_1 else " ")
+ lead
+ "".join(f" {(i + 1) % 10}" for i in range(-(-max_line_len // 10)))
+ "\n"
)
digits = "1234567890"
header2 = (
lead + ("" if base_1 else "0") + digits * (-(-max_line_len // 10)) + "\n"
)
return (
header1
+ header2
+ "\n".join(
f"{indent}{i:{lineno_width}d}:{line}{eol_mark}"
for i, line in enumerate(s_lines, start=start_line + base_1)
)
+ "\n"
)
venv\Lib\site-packages\pyparsing\unicode.py
# unicode.py
import sys
from itertools import filterfalse
from typing import Union
class _lazyclassproperty:
def __init__(self, fn):
self.fn = fn
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
def __get__(self, obj, cls):
if cls is None:
cls = type(obj)
if not hasattr(cls, "_intern") or any(
cls._intern is getattr(superclass, "_intern", [])
for superclass in cls.__mro__[1:]
):
cls._intern = {}
attrname = self.fn.__name__
if attrname not in cls._intern:
cls._intern[attrname] = self.fn(cls)
return cls._intern[attrname]
UnicodeRangeList = list[Union[tuple[int, int], tuple[int]]]
class unicode_set:
"""
A set of Unicode characters, for language-specific strings for
``alphas``, ``nums``, ``alphanums``, and ``printables``.
A unicode_set is defined by a list of ranges in the Unicode character
set, in a class attribute ``_ranges``. Ranges can be specified using
2-tuples or a 1-tuple, such as::
_ranges = [
(0x0020, 0x007e),
(0x00a0, 0x00ff),
(0x0100,),
]
Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
A unicode set can also be defined using multiple inheritance of other unicode sets::
class CJK(Chinese, Japanese, Korean):
pass
"""
_ranges: UnicodeRangeList = []
@_lazyclassproperty
def _chars_for_ranges(cls) -> list[str]:
ret: list[int] = []
for cc in cls.__mro__: # type: ignore[attr-defined]
if cc is unicode_set:
break
for rr in getattr(cc, "_ranges", ()):
ret.extend(range(rr[0], rr[-1] + 1))
return sorted(chr(c) for c in set(ret))
@_lazyclassproperty
def printables(cls) -> str:
"""all non-whitespace characters in this range"""
return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
@_lazyclassproperty
def alphas(cls) -> str:
"""all alphabetic characters in this range"""
return "".join(filter(str.isalpha, cls._chars_for_ranges))
@_lazyclassproperty
def nums(cls) -> str:
"""all numeric digit characters in this range"""
return "".join(filter(str.isdigit, cls._chars_for_ranges))
@_lazyclassproperty
def alphanums(cls) -> str:
"""all alphanumeric characters in this range"""
return cls.alphas + cls.nums
@_lazyclassproperty
def identchars(cls) -> str:
"""all characters in this range that are valid identifier characters, plus underscore '_'"""
return "".join(
sorted(
set(filter(str.isidentifier, cls._chars_for_ranges))
| set(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
"ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
"_"
)
)
)
@_lazyclassproperty
def identbodychars(cls) -> str:
"""
all characters in this range that are valid identifier body characters,
plus the digits 0-9, and · (Unicode MIDDLE DOT)
"""
identifier_chars = set(
c for c in cls._chars_for_ranges if ("_" + c).isidentifier()
)
return "".join(
sorted(identifier_chars | set(cls.identchars) | set("0123456789·"))
)
@_lazyclassproperty
def identifier(cls):
"""
a pyparsing Word expression for an identifier using this range's definitions for
identchars and identbodychars
"""
from pyparsing import Word
return Word(cls.identchars, cls.identbodychars)
class pyparsing_unicode(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
# fmt: off
# define ranges in language character sets
_ranges: UnicodeRangeList = [
(0x0020, sys.maxunicode),
]
class BasicMultilingualPlane(unicode_set):
"""Unicode set for the Basic Multilingual Plane"""
_ranges: UnicodeRangeList = [
(0x0020, 0xFFFF),
]
class Latin1(unicode_set):
"""Unicode set for Latin-1 Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0020, 0x007E),
(0x00A0, 0x00FF),
]
class LatinA(unicode_set):
"""Unicode set for Latin-A Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0100, 0x017F),
]
class LatinB(unicode_set):
"""Unicode set for Latin-B Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0180, 0x024F),
]
class Greek(unicode_set):
"""Unicode set for Greek Unicode Character Ranges"""
_ranges: UnicodeRangeList = [
(0x0342, 0x0345),
(0x0370, 0x0377),
(0x037A, 0x037F),
(0x0384, 0x038A),
(0x038C,),
(0x038E, 0x03A1),
(0x03A3, 0x03E1),
(0x03F0, 0x03FF),
(0x1D26, 0x1D2A),
(0x1D5E,),
(0x1D60,),
(0x1D66, 0x1D6A),
(0x1F00, 0x1F15),
(0x1F18, 0x1F1D),
(0x1F20, 0x1F45),
(0x1F48, 0x1F4D),
(0x1F50, 0x1F57),
(0x1F59,),
(0x1F5B,),
(0x1F5D,),
(0x1F5F, 0x1F7D),
(0x1F80, 0x1FB4),
(0x1FB6, 0x1FC4),
(0x1FC6, 0x1FD3),
(0x1FD6, 0x1FDB),
(0x1FDD, 0x1FEF),
(0x1FF2, 0x1FF4),
(0x1FF6, 0x1FFE),
(0x2129,),
(0x2719, 0x271A),
(0xAB65,),
(0x10140, 0x1018D),
(0x101A0,),
(0x1D200, 0x1D245),
(0x1F7A1, 0x1F7A7),
]
class Cyrillic(unicode_set):
"""Unicode set for Cyrillic Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0400, 0x052F),
(0x1C80, 0x1C88),
(0x1D2B,),
(0x1D78,),
(0x2DE0, 0x2DFF),
(0xA640, 0xA672),
(0xA674, 0xA69F),
(0xFE2E, 0xFE2F),
]
class Chinese(unicode_set):
"""Unicode set for Chinese Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x2E80, 0x2E99),
(0x2E9B, 0x2EF3),
(0x31C0, 0x31E3),
(0x3400, 0x4DB5),
(0x4E00, 0x9FEF),
(0xA700, 0xA707),
(0xF900, 0xFA6D),
(0xFA70, 0xFAD9),
(0x16FE2, 0x16FE3),
(0x1F210, 0x1F212),
(0x1F214, 0x1F23B),
(0x1F240, 0x1F248),
(0x20000, 0x2A6D6),
(0x2A700, 0x2B734),
(0x2B740, 0x2B81D),
(0x2B820, 0x2CEA1),
(0x2CEB0, 0x2EBE0),
(0x2F800, 0x2FA1D),
]
class Japanese(unicode_set):
"""Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"""
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x4E00, 0x9FBF),
(0x3000, 0x303F),
]
class Hiragana(unicode_set):
"""Unicode set for Hiragana Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x3041, 0x3096),
(0x3099, 0x30A0),
(0x30FC,),
(0xFF70,),
(0x1B001,),
(0x1B150, 0x1B152),
(0x1F200,),
]
class Katakana(unicode_set):
"""Unicode set for Katakana Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x3099, 0x309C),
(0x30A0, 0x30FF),
(0x31F0, 0x31FF),
(0x32D0, 0x32FE),
(0xFF65, 0xFF9F),
(0x1B000,),
(0x1B164, 0x1B167),
(0x1F201, 0x1F202),
(0x1F213,),
]
漢字 = Kanji
カタカナ = Katakana
ひらがな = Hiragana
_ranges = (
Kanji._ranges
+ Hiragana._ranges
+ Katakana._ranges
)
class Hangul(unicode_set):
"""Unicode set for Hangul (Korean) Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x1100, 0x11FF),
(0x302E, 0x302F),
(0x3131, 0x318E),
(0x3200, 0x321C),
(0x3260, 0x327B),
(0x327E,),
(0xA960, 0xA97C),
(0xAC00, 0xD7A3),
(0xD7B0, 0xD7C6),
(0xD7CB, 0xD7FB),
(0xFFA0, 0xFFBE),
(0xFFC2, 0xFFC7),
(0xFFCA, 0xFFCF),
(0xFFD2, 0xFFD7),
(0xFFDA, 0xFFDC),
]
Korean = Hangul
class CJK(Chinese, Japanese, Hangul):
"""Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"""
class Thai(unicode_set):
"""Unicode set for Thai Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0E01, 0x0E3A),
(0x0E3F, 0x0E5B)
]
class Arabic(unicode_set):
"""Unicode set for Arabic Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0600, 0x061B),
(0x061E, 0x06FF),
(0x0700, 0x077F),
]
class Hebrew(unicode_set):
"""Unicode set for Hebrew Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0591, 0x05C7),
(0x05D0, 0x05EA),
(0x05EF, 0x05F4),
(0xFB1D, 0xFB36),
(0xFB38, 0xFB3C),
(0xFB3E,),
(0xFB40, 0xFB41),
(0xFB43, 0xFB44),
(0xFB46, 0xFB4F),
]
class Devanagari(unicode_set):
"""Unicode set for Devanagari Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0900, 0x097F),
(0xA8E0, 0xA8FF)
]
BMP = BasicMultilingualPlane
# add language identifiers using language Unicode
العربية = Arabic
中文 = Chinese
кириллица = Cyrillic
Ελληνικά = Greek
עִברִית = Hebrew
日本語 = Japanese
한국어 = Korean
ไทย = Thai
देवनागरी = Devanagari
# fmt: on
venv\Lib\site-packages\pyparsing\util.py
# util.py
import contextlib
import re
from functools import lru_cache, wraps
import inspect
import itertools
import types
from typing import Callable, Union, Iterable, TypeVar, cast
import warnings
_bslash = chr(92)
C = TypeVar("C", bound=Callable)
class __config_flags:
"""Internal class for defining compatibility and debugging flags"""
_all_names: list[str] = []
_fixed_names: list[str] = []
_type_desc = "configuration"
@classmethod
def _set(cls, dname, value):
if dname in cls._fixed_names:
warnings.warn(
f"{cls.__name__}.{dname} {cls._type_desc} is {str(getattr(cls, dname)).upper()}"
f" and cannot be overridden",
stacklevel=3,
)
return
if dname in cls._all_names:
setattr(cls, dname, value)
else:
raise ValueError(f"no such {cls._type_desc} {dname!r}")
enable = classmethod(lambda cls, name: cls._set(name, True))
disable = classmethod(lambda cls, name: cls._set(name, False))
@lru_cache(maxsize=128)
def col(loc: int, strg: str) -> int:
"""
Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parse_string` for more
information on parsing strings containing ```` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
@lru_cache(maxsize=128)
def lineno(loc: int, strg: str) -> int:
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parse_string`
for more information on parsing strings containing ```` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
@lru_cache(maxsize=128)
def line(loc: int, strg: str) -> str:
"""
Returns the line of text containing loc within a string, counting newlines as line separators.
"""
last_cr = strg.rfind("\n", 0, loc)
next_cr = strg.find("\n", loc)
return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
class _UnboundedCache:
def __init__(self):
cache = {}
cache_get = cache.get
self.not_in_cache = not_in_cache = object()
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
def clear(_):
cache.clear()
self.size = None
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class _FifoCache:
def __init__(self, size):
cache = {}
self.size = size
self.not_in_cache = not_in_cache = object()
cache_get = cache.get
cache_pop = cache.pop
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
while len(cache) > size:
# pop oldest element in cache by getting the first key
cache_pop(next(iter(cache)))
def clear(_):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class LRUMemo:
"""
A memoizing mapping that retains `capacity` deleted items
The memo tracks retained items by their access order; once `capacity` items
are retained, the least recently used item is discarded.
"""
def __init__(self, capacity):
self._capacity = capacity
self._active = {}
self._memory = {}
def __getitem__(self, key):
try:
return self._active[key]
except KeyError:
self._memory[key] = self._memory.pop(key)
return self._memory[key]
def __setitem__(self, key, value):
self._memory.pop(key, None)
self._active[key] = value
def __delitem__(self, key):
try:
value = self._active.pop(key)
except KeyError:
pass
else:
oldest_keys = list(self._memory)[: -(self._capacity + 1)]
for key_to_delete in oldest_keys:
self._memory.pop(key_to_delete)
self._memory[key] = value
def clear(self):
self._active.clear()
self._memory.clear()
class UnboundedMemo(dict):
"""
A memoizing mapping that retains all deleted items
"""
def __delitem__(self, key):
pass
def _escape_regex_range_chars(s: str) -> str:
# escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return str(s)
class _GroupConsecutive:
"""
Used as a callable `key` for itertools.groupby to group
characters that are consecutive:
itertools.groupby("abcdejkmpqrs", key=IsConsecutive())
yields:
(0, iter(['a', 'b', 'c', 'd', 'e']))
(1, iter(['j', 'k']))
(2, iter(['m']))
(3, iter(['p', 'q', 'r', 's']))
"""
def __init__(self) -> None:
self.prev = 0
self.counter = itertools.count()
self.value = -1
def __call__(self, char: str) -> int:
c_int = ord(char)
self.prev, prev = c_int, self.prev
if c_int - prev > 1:
self.value = next(self.counter)
return self.value
def _collapse_string_to_ranges(
s: Union[str, Iterable[str]], re_escape: bool = True
) -> str:
r"""
Take a string or list of single-character strings, and return
a string of the consecutive characters in that string collapsed
into groups, as might be used in a regular expression '[a-z]'
character set:
'a' -> 'a' -> '[a]'
'bc' -> 'bc' -> '[bc]'
'defgh' -> 'd-h' -> '[d-h]'
'fdgeh' -> 'd-h' -> '[d-h]'
'jklnpqrtu' -> 'j-lnp-rtu' -> '[j-lnp-rtu]'
Duplicates get collapsed out:
'aaa' -> 'a' -> '[a]'
'bcbccb' -> 'bc' -> '[bc]'
'defghhgf' -> 'd-h' -> '[d-h]'
'jklnpqrjjjtu' -> 'j-lnp-rtu' -> '[j-lnp-rtu]'
Spaces are preserved:
'ab c' -> ' a-c' -> '[ a-c]'
Characters that are significant when defining regex ranges
get escaped:
'acde[]-' -> r'\-\[\]ac-e' -> r'[\-\[\]ac-e]'
"""
# Developer notes:
# - Do not optimize this code assuming that the given input string
# or internal lists will be short (such as in loading generators into
# lists to make it easier to find the last element); this method is also
# used to generate regex ranges for character sets in the pyparsing.unicode
# classes, and these can be _very_ long lists of strings
def escape_re_range_char(c: str) -> str:
return "\\" + c if c in r"\^-][" else c
def no_escape_re_range_char(c: str) -> str:
return c
if not re_escape:
escape_re_range_char = no_escape_re_range_char
ret = []
# reduce input string to remove duplicates, and put in sorted order
s_chars: list[str] = sorted(set(s))
if len(s_chars) > 2:
# find groups of characters that are consecutive (can be collapsed
# down to "-")
for _, chars in itertools.groupby(s_chars, key=_GroupConsecutive()):
# _ is unimportant, is just used to identify groups
# chars is an iterator of one or more consecutive characters
# that comprise the current group
first = last = next(chars)
with contextlib.suppress(ValueError):
*_, last = chars
if first == last:
# there was only a single char in this group
ret.append(escape_re_range_char(first))
elif last == chr(ord(first) + 1):
# there were only 2 characters in this group
# 'a','b' -> 'ab'
ret.append(f"{escape_re_range_char(first)}{escape_re_range_char(last)}")
else:
# there were > 2 characters in this group, make into a range
# 'c','d','e' -> 'c-e'
ret.append(
f"{escape_re_range_char(first)}-{escape_re_range_char(last)}"
)
else:
# only 1 or 2 chars were given to form into groups
# 'a' -> ['a']
# 'bc' -> ['b', 'c']
# 'dg' -> ['d', 'g']
# no need to list them with "-", just return as a list
# (after escaping)
ret = [escape_re_range_char(c) for c in s_chars]
return "".join(ret)
def _flatten(ll: Iterable) -> list:
ret = []
to_visit = [*ll]
while to_visit:
i = to_visit.pop(0)
if isinstance(i, Iterable) and not isinstance(i, str):
to_visit[:0] = i
else:
ret.append(i)
return ret
def make_compressed_re(
word_list: Iterable[str],
max_level: int = 2,
*,
non_capturing_groups: bool = True,
_level: int = 1,
) -> str:
"""
Create a regular expression string from a list of words, collapsing by common
prefixes and optional suffixes.
Calls itself recursively to build nested sublists for each group of suffixes
that have a shared prefix.
"""
def get_suffixes_from_common_prefixes(namelist: list[str]):
if len(namelist) > 1:
for prefix, suffixes in itertools.groupby(namelist, key=lambda s: s[:1]):
yield prefix, sorted([s[1:] for s in suffixes], key=len, reverse=True)
else:
yield namelist[0][0], [namelist[0][1:]]
if _level == 1:
if not word_list:
raise ValueError("no words given to make_compressed_re()")
if "" in word_list:
raise ValueError("word list cannot contain empty string")
else:
# internal recursive call, just return empty string if no words
if not word_list:
return ""
# dedupe the word list
word_list = list({}.fromkeys(word_list))
if max_level == 0:
if any(len(wd) > 1 for wd in word_list):
return "|".join(
sorted([re.escape(wd) for wd in word_list], key=len, reverse=True)
)
else:
return f"[{''.join(_escape_regex_range_chars(wd) for wd in word_list)}]"
ret = []
sep = ""
ncgroup = "?:" if non_capturing_groups else ""
for initial, suffixes in get_suffixes_from_common_prefixes(sorted(word_list)):
ret.append(sep)
sep = "|"
initial = re.escape(initial)
trailing = ""
if "" in suffixes:
trailing = "?"
suffixes.remove("")
if len(suffixes) > 1:
if all(len(s) == 1 for s in suffixes):
ret.append(
f"{initial}[{''.join(_escape_regex_range_chars(s) for s in suffixes)}]{trailing}"
)
else:
if _level < max_level:
suffix_re = make_compressed_re(
sorted(suffixes),
max_level,
non_capturing_groups=non_capturing_groups,
_level=_level + 1,
)
ret.append(f"{initial}({ncgroup}{suffix_re}){trailing}")
else:
if all(len(s) == 1 for s in suffixes):
ret.append(
f"{initial}[{''.join(_escape_regex_range_chars(s) for s in suffixes)}]{trailing}"
)
else:
suffixes.sort(key=len, reverse=True)
ret.append(
f"{initial}({ncgroup}{'|'.join(re.escape(s) for s in suffixes)}){trailing}"
)
else:
if suffixes:
suffix = re.escape(suffixes[0])
if len(suffix) > 1 and trailing:
ret.append(f"{initial}({ncgroup}{suffix}){trailing}")
else:
ret.append(f"{initial}{suffix}{trailing}")
else:
ret.append(initial)
return "".join(ret)
def replaced_by_pep8(compat_name: str, fn: C) -> C:
# In a future version, uncomment the code in the internal _inner() functions
# to begin emitting DeprecationWarnings.
# Unwrap staticmethod/classmethod
fn = getattr(fn, "__func__", fn)
# (Presence of 'self' arg in signature is used by explain_exception() methods, so we take
# some extra steps to add it if present in decorated function.)
if ["self"] == list(inspect.signature(fn).parameters)[:1]:
@wraps(fn)
def _inner(self, *args, **kwargs):
# warnings.warn(
# f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2
# )
return fn(self, *args, **kwargs)
else:
@wraps(fn)
def _inner(*args, **kwargs):
# warnings.warn(
# f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2
# )
return fn(*args, **kwargs)
_inner.__doc__ = f"""Deprecated - use :class:`{fn.__name__}`"""
_inner.__name__ = compat_name
_inner.__annotations__ = fn.__annotations__
if isinstance(fn, types.FunctionType):
_inner.__kwdefaults__ = fn.__kwdefaults__ # type: ignore [attr-defined]
elif isinstance(fn, type) and hasattr(fn, "__init__"):
_inner.__kwdefaults__ = fn.__init__.__kwdefaults__ # type: ignore [misc,attr-defined]
else:
_inner.__kwdefaults__ = None # type: ignore [attr-defined]
_inner.__qualname__ = fn.__qualname__
return cast(C, _inner)
venv\Lib\site-packages\pyparsing\__init__.py
# module pyparsing.py
#
# Copyright (c) 2003-2022 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``", !"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'`,
:class:`'|'`, :class:`'^'` and :class:`'&'` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parse_string` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'`, :class:`'|'`, :class:`'^'`,
and :class:`'&'` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.set_results_name`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`DelimitedList`
and :class:`one_of`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from typing import NamedTuple
class version_info(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@property
def __version__(self):
return (
f"{self.major}.{self.minor}.{self.micro}"
+ (
f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}",
"",
)[self.releaselevel == "final"]
)
def __str__(self):
return f"{__name__} {self.__version__} / {__version_time__}"
def __repr__(self):
return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})"
__version_info__ = version_info(3, 2, 3, "final", 1)
__version_time__ = "25 Mar 2025 01:38 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire "
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# Compatibility synonyms
if "pyparsing_unicode" not in globals():
pyparsing_unicode = unicode # type: ignore[misc]
if "pyparsing_common" not in globals():
pyparsing_common = common
if "pyparsing_test" not in globals():
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"AtLineStart",
"AtStringStart",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"CloseMatch",
"Combine",
"DelimitedList",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"PositionToken",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Tag",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"autoname_elements",
"c_style_comment",
"col",
"common_html_entity",
"condition_as_parse_action",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"infix_notation",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"original_text_for",
"printables",
"punc8bit",
"pyparsing_common",
"pyparsing_test",
"pyparsing_unicode",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted_string",
"srange",
"string_end",
"string_start",
"token_map",
"trace_parse_action",
"ungroup",
"unicode_set",
"unicode_string",
"with_attribute",
"with_class",
# pre-PEP8 compatibility names
"__versionTime__",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"commonHTMLEntity",
"conditionAsParseAction",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"htmlComment",
"indentedBlock",
"infixNotation",
"javaStyleComment",
"lineEnd",
"lineStart",
"locatedExpr",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"oneOf",
"opAssoc",
"originalTextFor",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"stringEnd",
"stringStart",
"tokenMap",
"traceParseAction",
"unicodeString",
"withAttribute",
"withClass",
"common",
"unicode",
"testing",
]
venv\Lib\site-packages\yaml\composer.py
__all__ = ['Composer', 'ComposerError']
from .error import MarkedYAMLError
from .events import *
from .nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer:
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurrence"
% anchor, self.anchors[anchor].start_mark,
"second occurrence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
venv\Lib\site-packages\yaml\constructor.py
__all__ = [
'BaseConstructor',
'SafeConstructor',
'FullConstructor',
'UnsafeConstructor',
'Constructor',
'ConstructorError'
]
from .error import *
from .nodes import *
import collections.abc, datetime, base64, binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor:
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def check_state_key(self, key):
"""Block special attributes/methods from being set in a newly created
object, to prevent user-controlled methods from being called during
deserialization"""
if self.get_state_keys_blacklist_regexp().match(key):
raise ConstructorError(None, None,
"blacklisted key '%s' in instance state found" % (key,), None)
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if tag_prefix is not None and node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = next(generator)
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.abc.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
@classmethod
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
@classmethod
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == 'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return super().construct_scalar(node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == 'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == 'tag:yaml.org,2002:value':
key_node.tag = 'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return super().construct_mapping(node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
'yes': True,
'no': False,
'true': True,
'false': False,
'on': True,
'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
r'''^(?P[0-9][0-9][0-9][0-9])
-(?P[0-9][0-9]?)
-(?P[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P[0-9][0-9]?)
:(?P[0-9][0-9])
:(?P[0-9][0-9])
(?:\.(?P[0-9]*))?
(?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?)
(?::(?P[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
tzinfo = None
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
tzinfo = datetime.timezone(delta)
elif values['tz']:
tzinfo = datetime.timezone.utc
return datetime.datetime(year, month, day, hour, minute, second, fraction,
tzinfo=tzinfo)
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag,
node.start_mark)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class FullConstructor(SafeConstructor):
# 'extend' is blacklisted because it is used by
# construct_python_object_apply to add `listitems` to a newly generate
# python instance
def get_state_keys_blacklist(self):
return ['^extend$', '^__.*__$']
def get_state_keys_blacklist_regexp(self):
if not hasattr(self, 'state_keys_blacklist_regexp'):
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
return self.state_keys_blacklist_regexp
def construct_python_str(self, node):
return self.construct_scalar(node)
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_bytes(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
def construct_python_long(self, node):
return self.construct_yaml_int(node)
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
if unsafe:
try:
__import__(name)
except ImportError as exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name, exc), mark)
if name not in sys.modules:
raise ConstructorError("while constructing a Python module", mark,
"module %r is not imported" % name, mark)
return sys.modules[name]
def find_python_name(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if '.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = 'builtins'
object_name = name
if unsafe:
try:
__import__(module_name)
except ImportError as exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name, exc), mark)
if module_name not in sys.modules:
raise ConstructorError("while constructing a Python object", mark,
"module %r is not imported" % module_name, mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r"
% (object_name, module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_module(suffix, node.start_mark)
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False, unsafe=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if not (unsafe or isinstance(cls, type)):
raise ConstructorError("while constructing a Python instance", node.start_mark,
"expected a class, but found %r" % type(cls),
node.start_mark)
if newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state, unsafe=False):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
if not unsafe and state:
for key in state.keys():
self.check_state_key(key)
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
if not unsafe:
self.check_state_key(key)
setattr(instance, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/none',
FullConstructor.construct_yaml_null)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bool',
FullConstructor.construct_yaml_bool)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/str',
FullConstructor.construct_python_str)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/unicode',
FullConstructor.construct_python_unicode)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bytes',
FullConstructor.construct_python_bytes)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/int',
FullConstructor.construct_yaml_int)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/long',
FullConstructor.construct_python_long)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/float',
FullConstructor.construct_yaml_float)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/complex',
FullConstructor.construct_python_complex)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/list',
FullConstructor.construct_yaml_seq)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/tuple',
FullConstructor.construct_python_tuple)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/dict',
FullConstructor.construct_yaml_map)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/name:',
FullConstructor.construct_python_name)
class UnsafeConstructor(FullConstructor):
def find_python_module(self, name, mark):
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
def find_python_name(self, name, mark):
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
return super(UnsafeConstructor, self).make_python_instance(
suffix, node, args, kwds, newobj, unsafe=True)
def set_python_instance_state(self, instance, state):
return super(UnsafeConstructor, self).set_python_instance_state(
instance, state, unsafe=True)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/module:',
UnsafeConstructor.construct_python_module)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object:',
UnsafeConstructor.construct_python_object)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/new:',
UnsafeConstructor.construct_python_object_new)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/apply:',
UnsafeConstructor.construct_python_object_apply)
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
# people have extended it directly.
class Constructor(UnsafeConstructor):
pass
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from .error import YAMLError
from .events import *
class EmitterError(YAMLError):
pass
class ScalarAnalysis:
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter:
DEFAULT_TAG_PREFIXES = {
'!' : '!',
'tag:yaml.org,2002:' : '!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overridden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = '\n'
if line_break in ['\r', '\n', '\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator('...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator('---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator('...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator('...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor('&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor('*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator('[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator(']', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator('{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator('}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator('}', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator('-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == '')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = '!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return '%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != '!' or handle[-1] != '!':
raise EmitterError("tag handle must start and end with '!': %r" % handle)
for ch in handle[1:-1]:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch, handle))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == '!':
end = 1
while end < len(prefix):
ch = prefix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return ''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == '!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == '!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.~*\'()[]' \
or (ch == '!' and handle != '!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ch)
if start < end:
chunks.append(suffix[start:end])
suffix_text = ''.join(chunks)
if handle:
return '%s%s' % (handle, suffix_text)
else:
return '!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch, anchor))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith('---') or scalar.startswith('...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in '#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in '?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in ',?[]{}':
flow_indicators = True
if ch == ':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '#' and preceded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in '\n\x85\u2028\u2029':
line_breaks = True
if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'
or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == ' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in '\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write('\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = ' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = ' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = '%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = '%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator('\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != ' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == '\'':
data = '\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
self.write_indicator('\'', False)
ESCAPE_REPLACEMENTS = {
'\0': '0',
'\x07': 'a',
'\x08': 'b',
'\x09': 't',
'\x0A': 'n',
'\x0B': 'v',
'\x0C': 'f',
'\x0D': 'r',
'\x1B': 'e',
'\"': '\"',
'\\': '\\',
'\x85': 'N',
'\xA0': '_',
'\u2028': 'L',
'\u2029': 'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator('"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
or not ('\x20' <= ch <= '\x7E'
or (self.allow_unicode
and ('\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= '\xFF':
data = '\\x%02X' % ord(ch)
elif ch <= '\uFFFF':
data = '\\u%04X' % ord(ch)
else:
data = '\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == ' ':
data = '\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator('"', False)
def determine_block_hints(self, text):
hints = ''
if text:
if text[0] in ' \n\x85\u2028\u2029':
hints += str(self.best_indent)
if text[-1] not in '\n\x85\u2028\u2029':
hints += '-'
elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
hints += '+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('>'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != ' ' \
and text[start] == '\n':
self.write_line_break()
leading_space = (ch == ' ')
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
spaces = (ch == ' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('|'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in '\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = ' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
venv\Lib\site-packages\yaml\error.py
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark:
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end]
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
venv\Lib\site-packages\yaml\events.py
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
venv\Lib\site-packages\yaml\loader.py
__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
from .reader import *
from .scanner import *
from .parser import *
from .composer import *
from .constructor import *
from .resolver import *
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
FullConstructor.__init__(self)
Resolver.__init__(self)
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
Constructor.__init__(self)
Resolver.__init__(self)
# UnsafeLoader is the same as Loader (which is and was always unsafe on
# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
# FullLoad should be able to load almost all YAML safely. Loader is left intact
# to ensure backwards compatibility.
class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
Constructor.__init__(self)
Resolver.__init__(self)
venv\Lib\site-packages\yaml\nodes.py
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = ''
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
venv\Lib\site-packages\yaml\parser.py
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from .error import MarkedYAMLError
from .tokens import *
from .events import *
from .scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser:
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
'!': '!',
'!!': 'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == 'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == 'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle,
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle,
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == '!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == '!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == '!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), '',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected , but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected , but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), '', mark, mark)
venv\Lib\site-packages\yaml\reader.py
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = ""
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = ""
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
venv\Lib\site-packages\yaml\representer.py
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from .error import *
from .nodes import *
import datetime, copyreg, types, base64, collections
class RepresenterError(YAMLError):
pass
class BaseRepresenter:
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
self.default_style = default_style
self.sort_keys = sort_keys
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, str(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
@classmethod
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
@classmethod
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
if self.sort_keys:
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data is None:
return True
if isinstance(data, tuple) and data == ():
return True
if isinstance(data, (str, bytes, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data):
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if '.' not in value and 'e' in value:
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object", data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes,
SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_complex(self, data):
if data.imag == 0.0:
data = '%r' % data.real
elif data.real == 0.0:
data = '%rj' % data.imag
elif data.imag > 0:
data = '%r+%rj' % (data.real, data.imag)
else:
data = '%r%rj' % (data.real, data.imag)
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = '%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
def represent_module(self, data):
return self.represent_scalar(
'tag:yaml.org,2002:python/module:'+data.__name__, '')
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copyreg.dispatch_table:
reduce = copyreg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent an object", data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = 'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = 'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = '%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
def represent_ordered_dict(self, data):
# Provide uniform representation across different Python versions.
data_type = type(data)
tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
% (data_type.__module__, data_type.__name__)
items = [[key, value] for key, value in data.items()]
return self.represent_sequence(tag, [items])
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_multi_representer(type,
Representer.represent_name)
Representer.add_representer(collections.OrderedDict,
Representer.represent_ordered_dict)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(object,
Representer.represent_object)
venv\Lib\site-packages\yaml\resolver.py
__all__ = ['BaseResolver', 'Resolver']
from .error import *
from .nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver:
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
implicit_resolvers = {}
for key in cls.yaml_implicit_resolvers:
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
cls.yaml_implicit_resolvers = implicit_resolvers
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, str) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (str, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, str):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, str):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == '':
resolvers = self.yaml_implicit_resolvers.get('', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers + wildcard_resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:bool',
re.compile(r'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list('yYnNtTfFoO'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:float',
re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9][0-9_]*(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list('-+0123456789.'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:int',
re.compile(r'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list('-+0123456789'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:merge',
re.compile(r'^(?:<<)$'),
['<'])
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:null',
re.compile(r'''^(?: ~
|null|Null|NULL
| )$''', re.X),
['~', 'n', 'N', ''])
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:timestamp',
re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list('0123456789'))
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:value',
re.compile(r'^(?:=)$'),
['='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
'tag:yaml.org,2002:yaml',
re.compile(r'^(?:!|&|\*)$'),
list('!&*'))
venv\Lib\site-packages\yaml\scanner.py
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from .error import MarkedYAMLError
from .tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey:
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner:
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
# Return None if no more tokens.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
else:
return None
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == '\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == '%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == '-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == '.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == '\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == '[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == '{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == ']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == '}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == ',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == '-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == '?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == ':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == '*':
return self.fetch_alias()
# Is it an anchor?
if ch == '&':
return self.fetch_anchor()
# Is it a tag?
if ch == '!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == '|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == '>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == '\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == '\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token" % ch,
self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in list(self.possible_simple_keys):
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid indentation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not necessary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be caught by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '---' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '...' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
and (ch == '-' or (not self.flow_level and ch in '?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if :
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == '\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == 'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == 'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r" % self.peek(),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r" % self.peek(),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not ('0' <= ch <= '9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch, self.get_mark())
length = 0
while '0' <= self.peek(length) <= '9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == ' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != ' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch, self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpreted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == '*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == '<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != '>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek(),
self.get_mark())
self.forward()
elif ch in '\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = '!'
self.forward()
else:
length = 1
use_handle = False
while ch not in '\0 \r\n\x85\u2028\u2029':
if ch == '!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = '!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = '!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = ''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != '\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in ' \t'
length = 0
while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != '\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == '\n' \
and leading_non_space and self.peek() not in ' \t':
if not breaks:
chunks.append(' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == '\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch, self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r" % ch,
self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() != ' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
while self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
'0': '\0',
'a': '\x07',
'b': '\x08',
't': '\x09',
'\t': '\x09',
'n': '\x0A',
'v': '\x0B',
'f': '\x0C',
'r': '\x0D',
'e': '\x1B',
' ': '\x20',
'\"': '\"',
'\\': '\\',
'/': '/',
'N': '\x85',
'_': '\xA0',
'L': '\u2028',
'P': '\u2029',
}
ESCAPE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == '\'' and self.peek(1) == '\'':
chunks.append('\'')
self.forward(2)
elif (double and ch == '\'') or (not double and ch in '\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == '\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexadecimal numbers, but found %r" %
(length, self.peek(k)), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(chr(code))
self.forward(length)
elif ch in '\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch, self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in ' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == '\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in ' \t':
self.forward()
if self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',' or '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == '#':
break
while True:
ch = self.peek(length)
if ch in '\0 \t\r\n\x85\u2028\u2029' \
or (ch == ':' and
self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029'
+ (u',[]{}' if self.flow_level else u''))\
or (self.flow_level and ch in ',?[]{}'):
break
length += 1
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == '#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in ' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != '!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length = 1
ch = self.peek(length)
if ch != ' ':
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if ch != '!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.!~*\'()[]%':
if ch == '%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch, self.get_mark())
return ''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
codes = []
mark = self.get_mark()
while self.peek() == '%':
self.forward()
for k in range(2):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexadecimal numbers, but found %r"
% self.peek(k), self.get_mark())
codes.append(int(self.prefix(2), 16))
self.forward(2)
try:
value = bytes(codes).decode('utf-8')
except UnicodeDecodeError as exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in '\r\n\x85':
if self.prefix(2) == '\r\n':
self.forward(2)
else:
self.forward()
return '\n'
elif ch in '\u2028\u2029':
self.forward()
return ch
return ''
venv\Lib\site-packages\yaml\serializer.py
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
venv\Lib\site-packages\yaml\tokens.py
class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__dict__
if not key.endswith('_mark')]
attributes.sort()
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
#class BOMToken(Token):
# id = ''
class DirectiveToken(Token):
id = ''
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class DocumentStartToken(Token):
id = ''
class DocumentEndToken(Token):
id = ''
class StreamStartToken(Token):
id = ''
def __init__(self, start_mark=None, end_mark=None,
encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndToken(Token):
id = ''
class BlockSequenceStartToken(Token):
id = ''
class BlockMappingStartToken(Token):
id = ''
class BlockEndToken(Token):
id = ''
class FlowSequenceStartToken(Token):
id = '['
class FlowMappingStartToken(Token):
id = '{'
class FlowSequenceEndToken(Token):
id = ']'
class FlowMappingEndToken(Token):
id = '}'
class KeyToken(Token):
id = '?'
class ValueToken(Token):
id = ':'
class BlockEntryToken(Token):
id = '-'
class FlowEntryToken(Token):
id = ','
class AliasToken(Token):
id = ''
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class AnchorToken(Token):
id = ''
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class TagToken(Token):
id = ''
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class ScalarToken(Token):
id = ''
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
venv\Lib\site-packages\yaml\__init__.py
from .error import *
from .tokens import *
from .events import *
from .nodes import *
from .loader import *
from .dumper import *
__version__ = '6.0.2'
try:
from .cyaml import *
__with_libyaml__ = True
except ImportError:
__with_libyaml__ = False
import io
#------------------------------------------------------------------------------
# XXX "Warnings control" is now deprecated. Leaving in the API function to not
# break code that uses it.
#------------------------------------------------------------------------------
def warnings(settings=None):
if settings is None:
return {}
#------------------------------------------------------------------------------
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
try:
while loader.check_token():
yield loader.get_token()
finally:
loader.dispose()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader.check_event():
yield loader.get_event()
finally:
loader.dispose()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
try:
while loader.check_node():
yield loader.get_node()
finally:
loader.dispose()
def load(stream, Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
def load_all(stream, Loader):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
loader = Loader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
def full_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load(stream, FullLoader)
def full_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load_all(stream, FullLoader)
def safe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load(stream, SafeLoader)
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load_all(stream, SafeLoader)
def unsafe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load(stream, UnsafeLoader)
def unsafe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load_all(stream, UnsafeLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
stream = io.StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
try:
for event in events:
dumper.emit(event)
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
try:
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
try:
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=None, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
if Loader is None:
loader.Loader.add_implicit_resolver(tag, regexp, first)
loader.FullLoader.add_implicit_resolver(tag, regexp, first)
loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
else:
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
if Loader is None:
loader.Loader.add_path_resolver(tag, path, kind)
loader.FullLoader.add_path_resolver(tag, path, kind)
loader.UnsafeLoader.add_path_resolver(tag, path, kind)
else:
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=None):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
if Loader is None:
loader.Loader.add_constructor(tag, constructor)
loader.FullLoader.add_constructor(tag, constructor)
loader.UnsafeLoader.add_constructor(tag, constructor)
else:
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
if Loader is None:
loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
else:
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
if isinstance(cls.yaml_loader, list):
for loader in cls.yaml_loader:
loader.add_constructor(cls.yaml_tag, cls.from_yaml)
else:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
class YAMLObject(metaclass=YAMLObjectMetaclass):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = [Loader, FullLoader, UnsafeLoader]
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
@classmethod
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
venv\Lib\site-packages\_yaml\__init__.py
# This is a stub package designed to roughly emulate the _yaml
# extension module, which previously existed as a standalone module
# and has been moved into the `yaml` package namespace.
# It does not perfectly mimic its old counterpart, but should get
# close enough for anyone who's relying on it even when they shouldn't.
import yaml
# in some circumstances, the yaml module we imoprted may be from a different version, so we need
# to tread carefully when poking at it here (it may not have the attributes we expect)
if not getattr(yaml, '__with_libyaml__', False):
from sys import version_info
exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
raise exc("No module named '_yaml'")
else:
from yaml._yaml import *
import warnings
warnings.warn(
'The _yaml extension module is now located at yaml._yaml'
' and its location is subject to change. To use the'
' LibYAML-based parser and emitter, import from `yaml`:'
' `from yaml import CLoader as Loader, CDumper as Dumper`.',
DeprecationWarning
)
del warnings
# Don't `del yaml` here because yaml is actually an existing
# namespace member of _yaml.
__name__ = '_yaml'
# If the module is top-level (i.e. not a part of any specific package)
# then the attribute should be set to ''.
# https://docs.python.org/3.8/library/types.html
__package__ = ''
from __future__ import annotations
import io
from typing import TYPE_CHECKING, Any
from bokeh.io import export_png, export_svg, show
from bokeh.io.export import get_screenshot_as_png
from bokeh.layouts import gridplot
from bokeh.models.annotations.labels import Label
from bokeh.palettes import Category10
from bokeh.plotting import figure
import numpy as np
from contourpy.enum_util import as_fill_type, as_line_type
from contourpy.util.bokeh_util import filled_to_bokeh, lines_to_bokeh
from contourpy.util.renderer import Renderer
if TYPE_CHECKING:
from bokeh.core.enums import OutputBackendType
from bokeh.models import GridPlot
from bokeh.palettes import Palette
from numpy.typing import ArrayLike
from selenium.webdriver.remote.webdriver import WebDriver
from contourpy import FillType, LineType
from contourpy._contourpy import FillReturn, LineReturn
class BokehRenderer(Renderer):
"""Utility renderer using Bokeh to render a grid of plots over the same (x, y) range.
Args:
nrows (int, optional): Number of rows of plots, default ``1``.
ncols (int, optional): Number of columns of plots, default ``1``.
figsize (tuple(float, float), optional): Figure size in inches (assuming 100 dpi), default
``(9, 9)``.
show_frame (bool, optional): Whether to show frame and axes ticks, default ``True``.
want_svg (bool, optional): Whether output is required in SVG format or not, default
``False``.
Warning:
:class:`~.BokehRenderer`, unlike :class:`~.MplRenderer`, needs to be told in advance if
output to SVG format will be required later, otherwise it will assume PNG output.
"""
_figures: list[figure]
_layout: GridPlot
_palette: Palette
_want_svg: bool
def __init__(
self,
nrows: int = 1,
ncols: int = 1,
figsize: tuple[float, float] = (9, 9),
show_frame: bool = True,
want_svg: bool = False,
) -> None:
self._want_svg = want_svg
self._palette = Category10[10]
total_size = 100*np.asarray(figsize, dtype=int) # Assuming 100 dpi.
nfigures = nrows*ncols
self._figures = []
backend: OutputBackendType = "svg" if self._want_svg else "canvas"
for _ in range(nfigures):
fig = figure(output_backend=backend)
fig.xgrid.visible = False
fig.ygrid.visible = False
self._figures.append(fig)
if not show_frame:
fig.outline_line_color = None
fig.axis.visible = False
self._layout = gridplot(
self._figures, ncols=ncols, toolbar_location=None, # type: ignore[arg-type]
width=total_size[0] // ncols, height=total_size[1] // nrows)
def _convert_color(self, color: str) -> str:
if isinstance(color, str) and color[0] == "C":
index = int(color[1:])
color = self._palette[index]
return color
def _get_figure(self, ax: figure | int) -> figure:
if isinstance(ax, int):
ax = self._figures[ax]
return ax
def filled(
self,
filled: FillReturn,
fill_type: FillType | str,
ax: figure | int = 0,
color: str = "C0",
alpha: float = 0.7,
) -> None:
"""Plot filled contours on a single plot.
Args:
filled (sequence of arrays): Filled contour data as returned by
:meth:`~.ContourGenerator.filled`.
fill_type (FillType or str): Type of :meth:`~.ContourGenerator.filled` data as returned
by :attr:`~.ContourGenerator.fill_type`, or a string equivalent.
ax (int or Bokeh Figure, optional): Which plot to use, default ``0``.
color (str, optional): Color to plot with. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``Category10`` palette. Default ``"C0"``.
alpha (float, optional): Opacity to plot with, default ``0.7``.
"""
fill_type = as_fill_type(fill_type)
fig = self._get_figure(ax)
color = self._convert_color(color)
xs, ys = filled_to_bokeh(filled, fill_type)
if len(xs) > 0:
fig.multi_polygons(xs=[xs], ys=[ys], color=color, fill_alpha=alpha, line_width=0) # type: ignore[arg-type]
def grid(
self,
x: ArrayLike,
y: ArrayLike,
ax: figure | int = 0,
color: str = "black",
alpha: float = 0.1,
point_color: str | None = None,
quad_as_tri_alpha: float = 0,
) -> None:
"""Plot quad grid lines on a single plot.
Args:
x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points.
y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points.
ax (int or Bokeh Figure, optional): Which plot to use, default ``0``.
color (str, optional): Color to plot grid lines, default ``"black"``.
alpha (float, optional): Opacity to plot lines with, default ``0.1``.
point_color (str, optional): Color to plot grid points or ``None`` if grid points
should not be plotted, default ``None``.
quad_as_tri_alpha (float, optional): Opacity to plot ``quad_as_tri`` grid, default
``0``.
Colors may be a string color or the letter ``"C"`` followed by an integer in the range
``"C0"`` to ``"C9"`` to use a color from the ``Category10`` palette.
Warning:
``quad_as_tri_alpha > 0`` plots all quads as though they are unmasked.
"""
fig = self._get_figure(ax)
x, y = self._grid_as_2d(x, y)
xs = list(x) + list(x.T)
ys = list(y) + list(y.T)
kwargs = {"line_color": color, "alpha": alpha}
fig.multi_line(xs, ys, **kwargs)
if quad_as_tri_alpha > 0:
# Assumes no quad mask.
xmid = (0.25*(x[:-1, :-1] + x[1:, :-1] + x[:-1, 1:] + x[1:, 1:])).ravel()
ymid = (0.25*(y[:-1, :-1] + y[1:, :-1] + y[:-1, 1:] + y[1:, 1:])).ravel()
fig.multi_line(
list(np.stack((x[:-1, :-1].ravel(), xmid, x[1:, 1:].ravel()), axis=1)),
list(np.stack((y[:-1, :-1].ravel(), ymid, y[1:, 1:].ravel()), axis=1)),
**kwargs)
fig.multi_line(
list(np.stack((x[:-1, 1:].ravel(), xmid, x[1:, :-1].ravel()), axis=1)),
list(np.stack((y[:-1, 1:].ravel(), ymid, y[1:, :-1].ravel()), axis=1)),
**kwargs)
if point_color is not None:
fig.scatter(
x=x.ravel(), y=y.ravel(), fill_color=color, line_color=None, alpha=alpha,
marker="circle", size=8)
def lines(
self,
lines: LineReturn,
line_type: LineType | str,
ax: figure | int = 0,
color: str = "C0",
alpha: float = 1.0,
linewidth: float = 1,
) -> None:
"""Plot contour lines on a single plot.
Args:
lines (sequence of arrays): Contour line data as returned by
:meth:`~.ContourGenerator.lines`.
line_type (LineType or str): Type of :meth:`~.ContourGenerator.lines` data as returned
by :attr:`~.ContourGenerator.line_type`, or a string equivalent.
ax (int or Bokeh Figure, optional): Which plot to use, default ``0``.
color (str, optional): Color to plot lines. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``Category10`` palette. Default ``"C0"``.
alpha (float, optional): Opacity to plot lines with, default ``1.0``.
linewidth (float, optional): Width of lines, default ``1``.
Note:
Assumes all lines are open line strips not closed line loops.
"""
line_type = as_line_type(line_type)
fig = self._get_figure(ax)
color = self._convert_color(color)
xs, ys = lines_to_bokeh(lines, line_type)
if xs is not None:
assert ys is not None
fig.line(xs, ys, line_color=color, line_alpha=alpha, line_width=linewidth)
def mask(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike | np.ma.MaskedArray[Any, Any],
ax: figure | int = 0,
color: str = "black",
) -> None:
"""Plot masked out grid points as circles on a single plot.
Args:
x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points.
y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points.
z (masked array of shape (ny, nx): z-values.
ax (int or Bokeh Figure, optional): Which plot to use, default ``0``.
color (str, optional): Circle color, default ``"black"``.
"""
mask = np.ma.getmask(z)
if mask is np.ma.nomask:
return
fig = self._get_figure(ax)
color = self._convert_color(color)
x, y = self._grid_as_2d(x, y)
fig.scatter(x[mask], y[mask], fill_color=color, marker="circle", size=10)
def save(
self,
filename: str,
transparent: bool = False,
*,
webdriver: WebDriver | None = None,
) -> None:
"""Save plots to SVG or PNG file.
Args:
filename (str): Filename to save to.
transparent (bool, optional): Whether background should be transparent, default
``False``.
webdriver (WebDriver, optional): Selenium WebDriver instance to use to create the image.
.. versionadded:: 1.1.1
Warning:
To output to SVG file, ``want_svg=True`` must have been passed to the constructor.
"""
if transparent:
for fig in self._figures:
fig.background_fill_color = None
fig.border_fill_color = None
if self._want_svg:
export_svg(self._layout, filename=filename, webdriver=webdriver)
else:
export_png(self._layout, filename=filename, webdriver=webdriver)
def save_to_buffer(self, *, webdriver: WebDriver | None = None) -> io.BytesIO:
"""Save plots to an ``io.BytesIO`` buffer.
Args:
webdriver (WebDriver, optional): Selenium WebDriver instance to use to create the image.
.. versionadded:: 1.1.1
Return:
BytesIO: PNG image buffer.
"""
image = get_screenshot_as_png(self._layout, driver=webdriver)
buffer = io.BytesIO()
image.save(buffer, "png")
return buffer
def show(self) -> None:
"""Show plots in web browser, in usual Bokeh manner.
"""
show(self._layout)
def title(self, title: str, ax: figure | int = 0, color: str | None = None) -> None:
"""Set the title of a single plot.
Args:
title (str): Title text.
ax (int or Bokeh Figure, optional): Which plot to set the title of, default ``0``.
color (str, optional): Color to set title. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``Category10`` palette. Default ``None`` which is ``black``.
"""
fig = self._get_figure(ax)
fig.title = title
fig.title.align = "center" # type: ignore[attr-defined]
if color is not None:
fig.title.text_color = self._convert_color(color) # type: ignore[attr-defined]
def z_values(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike,
ax: figure | int = 0,
color: str = "green",
fmt: str = ".1f",
quad_as_tri: bool = False,
) -> None:
"""Show ``z`` values on a single plot.
Args:
x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points.
y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points.
z (array-like of shape (ny, nx): z-values.
ax (int or Bokeh Figure, optional): Which plot to use, default ``0``.
color (str, optional): Color of added text. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``Category10`` palette. Default ``"green"``.
fmt (str, optional): Format to display z-values, default ``".1f"``.
quad_as_tri (bool, optional): Whether to show z-values at the ``quad_as_tri`` centres
of quads.
Warning:
``quad_as_tri=True`` shows z-values for all quads, even if masked.
"""
fig = self._get_figure(ax)
color = self._convert_color(color)
x, y = self._grid_as_2d(x, y)
z = np.asarray(z)
ny, nx = z.shape
kwargs = {"text_color": color, "text_align": "center", "text_baseline": "middle"}
for j in range(ny):
for i in range(nx):
label = Label(x=x[j, i], y=y[j, i], text=f"{z[j, i]:{fmt}}", **kwargs) # type: ignore[arg-type]
fig.add_layout(label)
if quad_as_tri:
for j in range(ny-1):
for i in range(nx-1):
xx = np.mean(x[j:j+2, i:i+2])
yy = np.mean(y[j:j+2, i:i+2])
zz = np.mean(z[j:j+2, i:i+2])
fig.add_layout(Label(x=xx, y=yy, text=f"{zz:{fmt}}", **kwargs)) # type: ignore[arg-type]
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from contourpy import FillType, LineType
from contourpy.array import offsets_from_codes
from contourpy.convert import convert_lines
from contourpy.dechunk import dechunk_lines
if TYPE_CHECKING:
from contourpy._contourpy import (
CoordinateArray,
FillReturn,
LineReturn,
LineReturn_ChunkCombinedNan,
)
def filled_to_bokeh(
filled: FillReturn,
fill_type: FillType,
) -> tuple[list[list[CoordinateArray]], list[list[CoordinateArray]]]:
xs: list[list[CoordinateArray]] = []
ys: list[list[CoordinateArray]] = []
if fill_type in (FillType.OuterOffset, FillType.ChunkCombinedOffset,
FillType.OuterCode, FillType.ChunkCombinedCode):
have_codes = fill_type in (FillType.OuterCode, FillType.ChunkCombinedCode)
for points, offsets in zip(*filled):
if points is None:
continue
if have_codes:
offsets = offsets_from_codes(offsets)
xs.append([]) # New outer with zero or more holes.
ys.append([])
for i in range(len(offsets)-1):
xys = points[offsets[i]:offsets[i+1]]
xs[-1].append(xys[:, 0])
ys[-1].append(xys[:, 1])
elif fill_type in (FillType.ChunkCombinedCodeOffset, FillType.ChunkCombinedOffsetOffset):
for points, codes_or_offsets, outer_offsets in zip(*filled):
if points is None:
continue
for j in range(len(outer_offsets)-1):
if fill_type == FillType.ChunkCombinedCodeOffset:
codes = codes_or_offsets[outer_offsets[j]:outer_offsets[j+1]]
offsets = offsets_from_codes(codes) + outer_offsets[j]
else:
offsets = codes_or_offsets[outer_offsets[j]:outer_offsets[j+1]+1]
xs.append([]) # New outer with zero or more holes.
ys.append([])
for k in range(len(offsets)-1):
xys = points[offsets[k]:offsets[k+1]]
xs[-1].append(xys[:, 0])
ys[-1].append(xys[:, 1])
else:
raise RuntimeError(f"Conversion of FillType {fill_type} to Bokeh is not implemented")
return xs, ys
def lines_to_bokeh(
lines: LineReturn,
line_type: LineType,
) -> tuple[CoordinateArray | None, CoordinateArray | None]:
lines = convert_lines(lines, line_type, LineType.ChunkCombinedNan)
lines = dechunk_lines(lines, LineType.ChunkCombinedNan)
if TYPE_CHECKING:
lines = cast(LineReturn_ChunkCombinedNan, lines)
points = lines[0][0]
if points is None:
return None, None
else:
return points[:, 0], points[:, 1]
venv\Lib\site-packages\contourpy\util\data.py
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import numpy as np
if TYPE_CHECKING:
from contourpy._contourpy import CoordinateArray
def simple(
shape: tuple[int, int], want_mask: bool = False,
) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]:
"""Return simple test data consisting of the sum of two gaussians.
Args:
shape (tuple(int, int)): 2D shape of data to return.
want_mask (bool, optional): Whether test data should be masked or not, default ``False``.
Return:
Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if
``want_mask=True``.
"""
ny, nx = shape
x = np.arange(nx, dtype=np.float64)
y = np.arange(ny, dtype=np.float64)
x, y = np.meshgrid(x, y)
xscale = nx - 1.0
yscale = ny - 1.0
# z is sum of 2D gaussians.
amp = np.asarray([1.0, -1.0, 0.8, -0.9, 0.7])
mid = np.asarray([[0.4, 0.2], [0.3, 0.8], [0.9, 0.75], [0.7, 0.3], [0.05, 0.7]])
width = np.asarray([0.4, 0.2, 0.2, 0.2, 0.1])
z = np.zeros_like(x)
for i in range(len(amp)):
z += amp[i]*np.exp(-((x/xscale - mid[i, 0])**2 + (y/yscale - mid[i, 1])**2) / width[i]**2)
if want_mask:
mask = np.logical_or(
((x/xscale - 1.0)**2 / 0.2 + (y/yscale - 0.0)**2 / 0.1) < 1.0,
((x/xscale - 0.2)**2 / 0.02 + (y/yscale - 0.45)**2 / 0.08) < 1.0,
)
z = np.ma.array(z, mask=mask) # type: ignore[no-untyped-call]
return x, y, z
def random(
shape: tuple[int, int], seed: int = 2187, mask_fraction: float = 0.0,
) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]:
"""Return random test data in the range 0 to 1.
Args:
shape (tuple(int, int)): 2D shape of data to return.
seed (int, optional): Seed for random number generator, default 2187.
mask_fraction (float, optional): Fraction of elements to mask, default 0.
Return:
Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if
``mask_fraction`` is greater than zero.
"""
ny, nx = shape
x = np.arange(nx, dtype=np.float64)
y = np.arange(ny, dtype=np.float64)
x, y = np.meshgrid(x, y)
rng = np.random.default_rng(seed)
z = rng.uniform(size=shape)
if mask_fraction > 0.0:
mask_fraction = min(mask_fraction, 0.99)
mask = rng.uniform(size=shape) < mask_fraction
z = np.ma.array(z, mask=mask) # type: ignore[no-untyped-call]
return x, y, z
from __future__ import annotations
import io
from itertools import pairwise
from typing import TYPE_CHECKING, Any, cast
import matplotlib.collections as mcollections
import matplotlib.pyplot as plt
import numpy as np
from contourpy import FillType, LineType
from contourpy.convert import convert_filled, convert_lines
from contourpy.enum_util import as_fill_type, as_line_type
from contourpy.util.mpl_util import filled_to_mpl_paths, lines_to_mpl_paths
from contourpy.util.renderer import Renderer
if TYPE_CHECKING:
from collections.abc import Sequence
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from numpy.typing import ArrayLike
import contourpy._contourpy as cpy
class MplRenderer(Renderer):
"""Utility renderer using Matplotlib to render a grid of plots over the same (x, y) range.
Args:
nrows (int, optional): Number of rows of plots, default ``1``.
ncols (int, optional): Number of columns of plots, default ``1``.
figsize (tuple(float, float), optional): Figure size in inches, default ``(9, 9)``.
show_frame (bool, optional): Whether to show frame and axes ticks, default ``True``.
backend (str, optional): Matplotlib backend to use or ``None`` for default backend.
Default ``None``.
gridspec_kw (dict, optional): Gridspec keyword arguments to pass to ``plt.subplots``,
default None.
"""
_axes: Sequence[Axes]
_fig: Figure
_want_tight: bool
def __init__(
self,
nrows: int = 1,
ncols: int = 1,
figsize: tuple[float, float] = (9, 9),
show_frame: bool = True,
backend: str | None = None,
gridspec_kw: dict[str, Any] | None = None,
) -> None:
if backend is not None:
import matplotlib as mpl
mpl.use(backend)
kwargs: dict[str, Any] = {"figsize": figsize, "squeeze": False,
"sharex": True, "sharey": True}
if gridspec_kw is not None:
kwargs["gridspec_kw"] = gridspec_kw
else:
kwargs["subplot_kw"] = {"aspect": "equal"}
self._fig, axes = plt.subplots(nrows, ncols, **kwargs)
self._axes = axes.flatten()
if not show_frame:
for ax in self._axes:
ax.axis("off")
self._want_tight = True
def __del__(self) -> None:
if hasattr(self, "_fig"):
plt.close(self._fig)
def _autoscale(self) -> None:
# Using axes._need_autoscale attribute if need to autoscale before rendering after adding
# lines/filled. Only want to autoscale once per axes regardless of how many lines/filled
# added.
for ax in self._axes:
if getattr(ax, "_need_autoscale", False):
ax.autoscale_view(tight=True)
ax._need_autoscale = False # type: ignore[attr-defined]
if self._want_tight and len(self._axes) > 1:
self._fig.tight_layout()
def _get_ax(self, ax: Axes | int) -> Axes:
if isinstance(ax, int):
ax = self._axes[ax]
return ax
def filled(
self,
filled: cpy.FillReturn,
fill_type: FillType | str,
ax: Axes | int = 0,
color: str = "C0",
alpha: float = 0.7,
) -> None:
"""Plot filled contours on a single Axes.
Args:
filled (sequence of arrays): Filled contour data as returned by
:meth:`~.ContourGenerator.filled`.
fill_type (FillType or str): Type of :meth:`~.ContourGenerator.filled` data as returned
by :attr:`~.ContourGenerator.fill_type`, or string equivalent
ax (int or Maplotlib Axes, optional): Which axes to plot on, default ``0``.
color (str, optional): Color to plot with. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``tab10`` colormap. Default ``"C0"``.
alpha (float, optional): Opacity to plot with, default ``0.7``.
"""
fill_type = as_fill_type(fill_type)
ax = self._get_ax(ax)
paths = filled_to_mpl_paths(filled, fill_type)
collection = mcollections.PathCollection(
paths, facecolors=color, edgecolors="none", lw=0, alpha=alpha)
ax.add_collection(collection)
ax._need_autoscale = True # type: ignore[attr-defined]
def grid(
self,
x: ArrayLike,
y: ArrayLike,
ax: Axes | int = 0,
color: str = "black",
alpha: float = 0.1,
point_color: str | None = None,
quad_as_tri_alpha: float = 0,
) -> None:
"""Plot quad grid lines on a single Axes.
Args:
x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points.
y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points.
ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``.
color (str, optional): Color to plot grid lines, default ``"black"``.
alpha (float, optional): Opacity to plot lines with, default ``0.1``.
point_color (str, optional): Color to plot grid points or ``None`` if grid points
should not be plotted, default ``None``.
quad_as_tri_alpha (float, optional): Opacity to plot ``quad_as_tri`` grid, default 0.
Colors may be a string color or the letter ``"C"`` followed by an integer in the range
``"C0"`` to ``"C9"`` to use a color from the ``tab10`` colormap.
Warning:
``quad_as_tri_alpha > 0`` plots all quads as though they are unmasked.
"""
ax = self._get_ax(ax)
x, y = self._grid_as_2d(x, y)
kwargs: dict[str, Any] = {"color": color, "alpha": alpha}
ax.plot(x, y, x.T, y.T, **kwargs)
if quad_as_tri_alpha > 0:
# Assumes no quad mask.
xmid = 0.25*(x[:-1, :-1] + x[1:, :-1] + x[:-1, 1:] + x[1:, 1:])
ymid = 0.25*(y[:-1, :-1] + y[1:, :-1] + y[:-1, 1:] + y[1:, 1:])
kwargs["alpha"] = quad_as_tri_alpha
ax.plot(
np.stack((x[:-1, :-1], xmid, x[1:, 1:])).reshape((3, -1)),
np.stack((y[:-1, :-1], ymid, y[1:, 1:])).reshape((3, -1)),
np.stack((x[1:, :-1], xmid, x[:-1, 1:])).reshape((3, -1)),
np.stack((y[1:, :-1], ymid, y[:-1, 1:])).reshape((3, -1)),
**kwargs)
if point_color is not None:
ax.plot(x, y, color=point_color, alpha=alpha, marker="o", lw=0)
ax._need_autoscale = True # type: ignore[attr-defined]
def lines(
self,
lines: cpy.LineReturn,
line_type: LineType | str,
ax: Axes | int = 0,
color: str = "C0",
alpha: float = 1.0,
linewidth: float = 1,
) -> None:
"""Plot contour lines on a single Axes.
Args:
lines (sequence of arrays): Contour line data as returned by
:meth:`~.ContourGenerator.lines`.
line_type (LineType or str): Type of :meth:`~.ContourGenerator.lines` data as returned
by :attr:`~.ContourGenerator.line_type`, or string equivalent.
ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``.
color (str, optional): Color to plot lines. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``tab10`` colormap. Default ``"C0"``.
alpha (float, optional): Opacity to plot lines with, default ``1.0``.
linewidth (float, optional): Width of lines, default ``1``.
"""
line_type = as_line_type(line_type)
ax = self._get_ax(ax)
paths = lines_to_mpl_paths(lines, line_type)
collection = mcollections.PathCollection(
paths, facecolors="none", edgecolors=color, lw=linewidth, alpha=alpha)
ax.add_collection(collection)
ax._need_autoscale = True # type: ignore[attr-defined]
def mask(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike | np.ma.MaskedArray[Any, Any],
ax: Axes | int = 0,
color: str = "black",
) -> None:
"""Plot masked out grid points as circles on a single Axes.
Args:
x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points.
y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points.
z (masked array of shape (ny, nx): z-values.
ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``.
color (str, optional): Circle color, default ``"black"``.
"""
mask = np.ma.getmask(z)
if mask is np.ma.nomask:
return
ax = self._get_ax(ax)
x, y = self._grid_as_2d(x, y)
ax.plot(x[mask], y[mask], "o", c=color)
def save(self, filename: str, transparent: bool = False) -> None:
"""Save plots to SVG or PNG file.
Args:
filename (str): Filename to save to.
transparent (bool, optional): Whether background should be transparent, default
``False``.
"""
self._autoscale()
self._fig.savefig(filename, transparent=transparent)
def save_to_buffer(self) -> io.BytesIO:
"""Save plots to an ``io.BytesIO`` buffer.
Return:
BytesIO: PNG image buffer.
"""
self._autoscale()
buf = io.BytesIO()
self._fig.savefig(buf, format="png")
buf.seek(0)
return buf
def show(self) -> None:
"""Show plots in an interactive window, in the usual Matplotlib manner.
"""
self._autoscale()
plt.show()
def title(self, title: str, ax: Axes | int = 0, color: str | None = None) -> None:
"""Set the title of a single Axes.
Args:
title (str): Title text.
ax (int or Matplotlib Axes, optional): Which Axes to set the title of, default ``0``.
color (str, optional): Color to set title. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``tab10`` colormap. Default is ``None`` which uses Matplotlib's default title color
that depends on the stylesheet in use.
"""
if color:
self._get_ax(ax).set_title(title, color=color)
else:
self._get_ax(ax).set_title(title)
def z_values(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike,
ax: Axes | int = 0,
color: str = "green",
fmt: str = ".1f",
quad_as_tri: bool = False,
) -> None:
"""Show ``z`` values on a single Axes.
Args:
x (array-like of shape (ny, nx) or (nx,)): The x-coordinates of the grid points.
y (array-like of shape (ny, nx) or (ny,)): The y-coordinates of the grid points.
z (array-like of shape (ny, nx): z-values.
ax (int or Matplotlib Axes, optional): Which Axes to plot on, default ``0``.
color (str, optional): Color of added text. May be a string color or the letter ``"C"``
followed by an integer in the range ``"C0"`` to ``"C9"`` to use a color from the
``tab10`` colormap. Default ``"green"``.
fmt (str, optional): Format to display z-values, default ``".1f"``.
quad_as_tri (bool, optional): Whether to show z-values at the ``quad_as_tri`` centers
of quads.
Warning:
``quad_as_tri=True`` shows z-values for all quads, even if masked.
"""
ax = self._get_ax(ax)
x, y = self._grid_as_2d(x, y)
z = np.asarray(z)
ny, nx = z.shape
for j in range(ny):
for i in range(nx):
ax.text(x[j, i], y[j, i], f"{z[j, i]:{fmt}}", ha="center", va="center",
color=color, clip_on=True)
if quad_as_tri:
for j in range(ny-1):
for i in range(nx-1):
xx = np.mean(x[j:j+2, i:i+2], dtype=np.float64)
yy = np.mean(y[j:j+2, i:i+2], dtype=np.float64)
zz = np.mean(z[j:j+2, i:i+2])
ax.text(xx, yy, f"{zz:{fmt}}", ha="center", va="center", color=color,
clip_on=True)
class MplTestRenderer(MplRenderer):
"""Test renderer implemented using Matplotlib.
No whitespace around plots and no spines/ticks displayed.
Uses Agg backend, so can only save to file/buffer, cannot call ``show()``.
"""
def __init__(
self,
nrows: int = 1,
ncols: int = 1,
figsize: tuple[float, float] = (9, 9),
) -> None:
gridspec = {
"left": 0.01,
"right": 0.99,
"top": 0.99,
"bottom": 0.01,
"wspace": 0.01,
"hspace": 0.01,
}
super().__init__(
nrows, ncols, figsize, show_frame=True, backend="Agg", gridspec_kw=gridspec,
)
for ax in self._axes:
ax.set_xmargin(0.0)
ax.set_ymargin(0.0)
ax.set_xticks([])
ax.set_yticks([])
self._want_tight = False
class MplDebugRenderer(MplRenderer):
"""Debug renderer implemented using Matplotlib.
Extends ``MplRenderer`` to add extra information to help in debugging such as markers, arrows,
text, etc.
"""
def __init__(
self,
nrows: int = 1,
ncols: int = 1,
figsize: tuple[float, float] = (9, 9),
show_frame: bool = True,
) -> None:
super().__init__(nrows, ncols, figsize, show_frame)
def _arrow(
self,
ax: Axes,
line_start: cpy.CoordinateArray,
line_end: cpy.CoordinateArray,
color: str,
alpha: float,
arrow_size: float,
) -> None:
mid = 0.5*(line_start + line_end)
along = line_end - line_start
along /= np.sqrt(np.dot(along, along)) # Unit vector.
right = np.asarray((along[1], -along[0]))
arrow = np.stack((
mid - (along*0.5 - right)*arrow_size,
mid + along*0.5*arrow_size,
mid - (along*0.5 + right)*arrow_size,
))
ax.plot(arrow[:, 0], arrow[:, 1], "-", c=color, alpha=alpha)
def filled(
self,
filled: cpy.FillReturn,
fill_type: FillType | str,
ax: Axes | int = 0,
color: str = "C1",
alpha: float = 0.7,
line_color: str = "C0",
line_alpha: float = 0.7,
point_color: str = "C0",
start_point_color: str = "red",
arrow_size: float = 0.1,
) -> None:
fill_type = as_fill_type(fill_type)
super().filled(filled, fill_type, ax, color, alpha)
if line_color is None and point_color is None:
return
ax = self._get_ax(ax)
filled = convert_filled(filled, fill_type, FillType.ChunkCombinedOffset)
# Lines.
if line_color is not None:
for points, offsets in zip(*filled):
if points is None:
continue
for start, end in pairwise(offsets):
xys = points[start:end]
ax.plot(xys[:, 0], xys[:, 1], c=line_color, alpha=line_alpha)
if arrow_size > 0.0:
n = len(xys)
for i in range(n-1):
self._arrow(ax, xys[i], xys[i+1], line_color, line_alpha, arrow_size)
# Points.
if point_color is not None:
for points, offsets in zip(*filled):
if points is None:
continue
mask = np.ones(offsets[-1], dtype=bool)
mask[offsets[1:]-1] = False # Exclude end points.
if start_point_color is not None:
start_indices = offsets[:-1]
mask[start_indices] = False # Exclude start points.
ax.plot(
points[:, 0][mask], points[:, 1][mask], "o", c=point_color, alpha=line_alpha)
if start_point_color is not None:
ax.plot(points[:, 0][start_indices], points[:, 1][start_indices], "o",
c=start_point_color, alpha=line_alpha)
def lines(
self,
lines: cpy.LineReturn,
line_type: LineType | str,
ax: Axes | int = 0,
color: str = "C0",
alpha: float = 1.0,
linewidth: float = 1,
point_color: str = "C0",
start_point_color: str = "red",
arrow_size: float = 0.1,
) -> None:
line_type = as_line_type(line_type)
super().lines(lines, line_type, ax, color, alpha, linewidth)
if arrow_size == 0.0 and point_color is None:
return
ax = self._get_ax(ax)
separate_lines = convert_lines(lines, line_type, LineType.Separate)
if TYPE_CHECKING:
separate_lines = cast(cpy.LineReturn_Separate, separate_lines)
if arrow_size > 0.0:
for line in separate_lines:
for i in range(len(line)-1):
self._arrow(ax, line[i], line[i+1], color, alpha, arrow_size)
if point_color is not None:
for line in separate_lines:
start_index = 0
end_index = len(line)
if start_point_color is not None:
ax.plot(line[0, 0], line[0, 1], "o", c=start_point_color, alpha=alpha)
start_index = 1
if line[0][0] == line[-1][0] and line[0][1] == line[-1][1]:
end_index -= 1
ax.plot(line[start_index:end_index, 0], line[start_index:end_index, 1], "o",
c=color, alpha=alpha)
def point_numbers(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike,
ax: Axes | int = 0,
color: str = "red",
) -> None:
ax = self._get_ax(ax)
x, y = self._grid_as_2d(x, y)
z = np.asarray(z)
ny, nx = z.shape
for j in range(ny):
for i in range(nx):
quad = i + j*nx
ax.text(x[j, i], y[j, i], str(quad), ha="right", va="top", color=color,
clip_on=True)
def quad_numbers(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike,
ax: Axes | int = 0,
color: str = "blue",
) -> None:
ax = self._get_ax(ax)
x, y = self._grid_as_2d(x, y)
z = np.asarray(z)
ny, nx = z.shape
for j in range(1, ny):
for i in range(1, nx):
quad = i + j*nx
xmid = x[j-1:j+1, i-1:i+1].mean()
ymid = y[j-1:j+1, i-1:i+1].mean()
ax.text(xmid, ymid, str(quad), ha="center", va="center", color=color, clip_on=True)
def z_levels(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike,
lower_level: float,
upper_level: float | None = None,
ax: Axes | int = 0,
color: str = "green",
) -> None:
ax = self._get_ax(ax)
x, y = self._grid_as_2d(x, y)
z = np.asarray(z)
ny, nx = z.shape
for j in range(ny):
for i in range(nx):
zz = z[j, i]
if upper_level is not None and zz > upper_level:
z_level = 2
elif zz > lower_level:
z_level = 1
else:
z_level = 0
ax.text(x[j, i], y[j, i], str(z_level), ha="left", va="bottom", color=color,
clip_on=True)
venv\Lib\site-packages\contourpy\util\mpl_util.py
from __future__ import annotations
from itertools import pairwise
from typing import TYPE_CHECKING, cast
import matplotlib.path as mpath
import numpy as np
from contourpy import FillType, LineType
from contourpy.array import codes_from_offsets
if TYPE_CHECKING:
from contourpy._contourpy import FillReturn, LineReturn, LineReturn_Separate
def filled_to_mpl_paths(filled: FillReturn, fill_type: FillType) -> list[mpath.Path]:
if fill_type in (FillType.OuterCode, FillType.ChunkCombinedCode):
paths = [mpath.Path(points, codes) for points, codes in zip(*filled) if points is not None]
elif fill_type in (FillType.OuterOffset, FillType.ChunkCombinedOffset):
paths = [mpath.Path(points, codes_from_offsets(offsets))
for points, offsets in zip(*filled) if points is not None]
elif fill_type == FillType.ChunkCombinedCodeOffset:
paths = []
for points, codes, outer_offsets in zip(*filled):
if points is None:
continue
points = np.split(points, outer_offsets[1:-1])
codes = np.split(codes, outer_offsets[1:-1])
paths += [mpath.Path(p, c) for p, c in zip(points, codes)]
elif fill_type == FillType.ChunkCombinedOffsetOffset:
paths = []
for points, offsets, outer_offsets in zip(*filled):
if points is None:
continue
for i in range(len(outer_offsets)-1):
offs = offsets[outer_offsets[i]:outer_offsets[i+1]+1]
pts = points[offs[0]:offs[-1]]
paths += [mpath.Path(pts, codes_from_offsets(offs - offs[0]))]
else:
raise RuntimeError(f"Conversion of FillType {fill_type} to MPL Paths is not implemented")
return paths
def lines_to_mpl_paths(lines: LineReturn, line_type: LineType) -> list[mpath.Path]:
if line_type == LineType.Separate:
if TYPE_CHECKING:
lines = cast(LineReturn_Separate, lines)
paths = []
for line in lines:
# Drawing as Paths so that they can be closed correctly.
closed = line[0, 0] == line[-1, 0] and line[0, 1] == line[-1, 1]
paths.append(mpath.Path(line, closed=closed))
elif line_type in (LineType.SeparateCode, LineType.ChunkCombinedCode):
paths = [mpath.Path(points, codes) for points, codes in zip(*lines) if points is not None]
elif line_type == LineType.ChunkCombinedOffset:
paths = []
for points, offsets in zip(*lines):
if points is None:
continue
for i in range(len(offsets)-1):
line = points[offsets[i]:offsets[i+1]]
closed = line[0, 0] == line[-1, 0] and line[0, 1] == line[-1, 1]
paths.append(mpath.Path(line, closed=closed))
elif line_type == LineType.ChunkCombinedNan:
paths = []
for points in lines[0]:
if points is None:
continue
nan_offsets = np.nonzero(np.isnan(points[:, 0]))[0]
nan_offsets = np.concatenate([[-1], nan_offsets, [len(points)]])
for s, e in pairwise(nan_offsets):
line = points[s+1:e]
closed = line[0, 0] == line[-1, 0] and line[0, 1] == line[-1, 1]
paths.append(mpath.Path(line, closed=closed))
else:
raise RuntimeError(f"Conversion of LineType {line_type} to MPL Paths is not implemented")
return paths
venv\Lib\site-packages\contourpy\util\renderer.py
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
import numpy as np
if TYPE_CHECKING:
import io
from numpy.typing import ArrayLike
from contourpy._contourpy import CoordinateArray, FillReturn, FillType, LineReturn, LineType
class Renderer(ABC):
"""Abstract base class for renderers."""
def _grid_as_2d(self, x: ArrayLike, y: ArrayLike) -> tuple[CoordinateArray, CoordinateArray]:
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1:
x, y = np.meshgrid(x, y)
return x, y
@abstractmethod
def filled(
self,
filled: FillReturn,
fill_type: FillType | str,
ax: Any = 0,
color: str = "C0",
alpha: float = 0.7,
) -> None:
pass
@abstractmethod
def grid(
self,
x: ArrayLike,
y: ArrayLike,
ax: Any = 0,
color: str = "black",
alpha: float = 0.1,
point_color: str | None = None,
quad_as_tri_alpha: float = 0,
) -> None:
pass
@abstractmethod
def lines(
self,
lines: LineReturn,
line_type: LineType | str,
ax: Any = 0,
color: str = "C0",
alpha: float = 1.0,
linewidth: float = 1,
) -> None:
pass
@abstractmethod
def mask(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike | np.ma.MaskedArray[Any, Any],
ax: Any = 0,
color: str = "black",
) -> None:
pass
def multi_filled(
self,
multi_filled: list[FillReturn],
fill_type: FillType | str,
ax: Any = 0,
color: str | None = None,
**kwargs: Any,
) -> None:
"""Plot multiple sets of filled contours on a single axes.
Args:
multi_filled (list of filled contour arrays): Multiple filled contour sets as returned
by :meth:`.ContourGenerator.multi_filled`.
fill_type (FillType or str): Type of filled data as returned by
:attr:`~.ContourGenerator.fill_type`, or string equivalent.
ax (int or Renderer-specific axes or figure object, optional): Which axes to plot on,
default ``0``.
color (str or None, optional): If a string color then this same color is used for all
filled contours. If ``None``, the default, then the filled contour sets use colors
from the ``tab10`` colormap in order, wrapping around to the beginning if more than
10 sets of filled contours are rendered.
kwargs: All other keyword argument are passed on to
:meth:`.Renderer.filled` unchanged.
.. versionadded:: 1.3.0
"""
if color is not None:
kwargs["color"] = color
for i, filled in enumerate(multi_filled):
if color is None:
kwargs["color"] = f"C{i % 10}"
self.filled(filled, fill_type, ax, **kwargs)
def multi_lines(
self,
multi_lines: list[LineReturn],
line_type: LineType | str,
ax: Any = 0,
color: str | None = None,
**kwargs: Any,
) -> None:
"""Plot multiple sets of contour lines on a single axes.
Args:
multi_lines (list of contour line arrays): Multiple contour line sets as returned by
:meth:`.ContourGenerator.multi_lines`.
line_type (LineType or str): Type of line data as returned by
:attr:`~.ContourGenerator.line_type`, or string equivalent.
ax (int or Renderer-specific axes or figure object, optional): Which axes to plot on,
default ``0``.
color (str or None, optional): If a string color then this same color is used for all
lines. If ``None``, the default, then the line sets use colors from the ``tab10``
colormap in order, wrapping around to the beginning if more than 10 sets of lines
are rendered.
kwargs: All other keyword argument are passed on to
:meth:`Renderer.lines` unchanged.
.. versionadded:: 1.3.0
"""
if color is not None:
kwargs["color"] = color
for i, lines in enumerate(multi_lines):
if color is None:
kwargs["color"] = f"C{i % 10}"
self.lines(lines, line_type, ax, **kwargs)
@abstractmethod
def save(self, filename: str, transparent: bool = False) -> None:
pass
@abstractmethod
def save_to_buffer(self) -> io.BytesIO:
pass
@abstractmethod
def show(self) -> None:
pass
@abstractmethod
def title(self, title: str, ax: Any = 0, color: str | None = None) -> None:
pass
@abstractmethod
def z_values(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike,
ax: Any = 0,
color: str = "green",
fmt: str = ".1f",
quad_as_tri: bool = False,
) -> None:
pass
# -*- coding: utf-8 -*-
"""
This module offers a parser for ISO-8601 strings
It is intended to support all valid date, time and datetime formats per the
ISO-8601 specification.
..versionadded:: 2.7.0
"""
from datetime import datetime, timedelta, time, date
import calendar
from dateutil import tz
from functools import wraps
import re
import six
__all__ = ["isoparse", "isoparser"]
def _takes_ascii(f):
@wraps(f)
def func(self, str_in, *args, **kwargs):
# If it's a stream, read the whole thing
str_in = getattr(str_in, 'read', lambda: str_in)()
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
if isinstance(str_in, six.text_type):
# ASCII is the same in UTF-8
try:
str_in = str_in.encode('ascii')
except UnicodeEncodeError as e:
msg = 'ISO-8601 strings should contain only ASCII characters'
six.raise_from(ValueError(msg), e)
return f(self, str_in, *args, **kwargs)
return func
class isoparser(object):
def __init__(self, sep=None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
sep = sep.encode('ascii')
self._sep = sep
@_takes_ascii
def isoparse(self, dt_str):
"""
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
An ISO-8601 datetime string consists of a date portion, followed
optionally by a time portion - the date and time portions are separated
by a single character separator, which is ``T`` in the official
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
combined with a time portion.
Supported date formats are:
Common:
- ``YYYY``
- ``YYYY-MM``
- ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon:
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
The ISO week and day numbering follows the same logic as
:func:`datetime.date.isocalendar`.
Supported time formats are:
- ``hh``
- ``hh:mm`` or ``hhmm``
- ``hh:mm:ss`` or ``hhmmss``
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
Midnight is a special case for `hh`, as the standard supports both
00:00 and 24:00 as a representation. The decimal separator can be
either a dot or a comma.
.. caution::
Support for fractional components other than seconds is part of the
ISO-8601 standard, but is not currently implemented in this parser.
Supported time zone offset formats are:
- `Z` (UTC)
- `±HH:MM`
- `±HHMM`
- `±HH`
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
with the exception of UTC, which will be represented as
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
:param dt_str:
A string or stream containing only an ISO-8601 datetime string
:return:
Returns a :class:`datetime.datetime` representing the string.
Unspecified components default to their lowest value.
.. warning::
As of version 2.7.0, the strictness of the parser should not be
considered a stable part of the contract. Any valid ISO-8601 string
that parses correctly with the default settings will continue to
parse correctly in future versions, but invalid strings that
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
guaranteed to continue failing in future versions if they encode
a valid date.
.. versionadded:: 2.7.0
"""
components, pos = self._parse_isodate(dt_str)
if len(dt_str) > pos:
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
components += self._parse_isotime(dt_str[pos + 1:])
else:
raise ValueError('String contains unknown ISO components')
if len(components) > 3 and components[3] == 24:
components[3] = 0
return datetime(*components) + timedelta(days=1)
return datetime(*components)
@_takes_ascii
def parse_isodate(self, datestr):
"""
Parse the date portion of an ISO string.
:param datestr:
The string portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.date` object
"""
components, pos = self._parse_isodate(datestr)
if pos < len(datestr):
raise ValueError('String contains unknown ISO ' +
'components: {!r}'.format(datestr.decode('ascii')))
return date(*components)
@_takes_ascii
def parse_isotime(self, timestr):
"""
Parse the time portion of an ISO string.
:param timestr:
The time portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.time` object
"""
components = self._parse_isotime(timestr)
if components[0] == 24:
components[0] = 0
return time(*components)
@_takes_ascii
def parse_tzstr(self, tzstr, zero_as_utc=True):
"""
Parse a valid ISO time zone string.
See :func:`isoparser.isoparse` for details on supported formats.
:param tzstr:
A string representing an ISO time zone offset
:param zero_as_utc:
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
:return:
Returns :class:`dateutil.tz.tzoffset` for offsets and
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
specified) offsets equivalent to UTC.
"""
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
# Constants
_DATE_SEP = b'-'
_TIME_SEP = b':'
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
def _parse_isodate(self, dt_str):
try:
return self._parse_isodate_common(dt_str)
except ValueError:
return self._parse_isodate_uncommon(dt_str)
def _parse_isodate_common(self, dt_str):
len_str = len(dt_str)
components = [1, 1, 1]
if len_str < 4:
raise ValueError('ISO string too short')
# Year
components[0] = int(dt_str[0:4])
pos = 4
if pos >= len_str:
return components, pos
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
if has_sep:
pos += 1
# Month
if len_str - pos < 2:
raise ValueError('Invalid common month')
components[1] = int(dt_str[pos:pos + 2])
pos += 2
if pos >= len_str:
if has_sep:
return components, pos
else:
raise ValueError('Invalid ISO format')
if has_sep:
if dt_str[pos:pos + 1] != self._DATE_SEP:
raise ValueError('Invalid separator in ISO string')
pos += 1
# Day
if len_str - pos < 2:
raise ValueError('Invalid common day')
components[2] = int(dt_str[pos:pos + 2])
return components, pos + 2
def _parse_isodate_uncommon(self, dt_str):
if len(dt_str) < 4:
raise ValueError('ISO string too short')
# All ISO formats start with the year
year = int(dt_str[0:4])
has_sep = dt_str[4:5] == self._DATE_SEP
pos = 4 + has_sep # Skip '-' if it's there
if dt_str[pos:pos + 1] == b'W':
# YYYY-?Www-?D?
pos += 1
weekno = int(dt_str[pos:pos + 2])
pos += 2
dayno = 1
if len(dt_str) > pos:
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
raise ValueError('Inconsistent use of dash separator')
pos += has_sep
dayno = int(dt_str[pos:pos + 1])
pos += 1
base_date = self._calculate_weekdate(year, weekno, dayno)
else:
# YYYYDDD or YYYY-DDD
if len(dt_str) - pos < 3:
raise ValueError('Invalid ordinal day')
ordinal_day = int(dt_str[pos:pos + 3])
pos += 3
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
raise ValueError('Invalid ordinal day' +
' {} for year {}'.format(ordinal_day, year))
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
components = [base_date.year, base_date.month, base_date.day]
return components, pos
def _calculate_weekdate(self, year, week, day):
"""
Calculate the day of corresponding to the ISO year-week-day calendar.
This function is effectively the inverse of
:func:`datetime.date.isocalendar`.
:param year:
The year in the ISO calendar
:param week:
The week in the ISO calendar - range is [1, 53]
:param day:
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
:return:
Returns a :class:`datetime.date`
"""
if not 0 < week < 54:
raise ValueError('Invalid week: {}'.format(week))
if not 0 < day < 8: # Range is 1-7
raise ValueError('Invalid weekday: {}'.format(day))
# Get week 1 for the specific year:
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
# Now add the specific number of weeks and days to get what we want
week_offset = (week - 1) * 7 + (day - 1)
return week_1 + timedelta(days=week_offset)
def _parse_isotime(self, timestr):
len_str = len(timestr)
components = [0, 0, 0, 0, None]
pos = 0
comp = -1
if len_str < 2:
raise ValueError('ISO time too short')
has_sep = False
while pos < len_str and comp < 5:
comp += 1
if timestr[pos:pos + 1] in b'-+Zz':
# Detect time zone boundary
components[-1] = self._parse_tzstr(timestr[pos:])
pos = len_str
break
if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
has_sep = True
pos += 1
elif comp == 2 and has_sep:
if timestr[pos:pos+1] != self._TIME_SEP:
raise ValueError('Inconsistent use of colon separator')
pos += 1
if comp < 3:
# Hour, minute, second
components[comp] = int(timestr[pos:pos + 2])
pos += 2
if comp == 3:
# Fraction of a second
frac = self._FRACTION_REGEX.match(timestr[pos:])
if not frac:
continue
us_str = frac.group(1)[:6] # Truncate to microseconds
components[comp] = int(us_str) * 10**(6 - len(us_str))
pos += len(frac.group())
if pos < len_str:
raise ValueError('Unused components in ISO string')
if components[0] == 24:
# Standard supports 00:00 and 24:00 as representations of midnight
if any(component != 0 for component in components[1:4]):
raise ValueError('Hour may only be 24 at 24:00:00.000')
return components
def _parse_tzstr(self, tzstr, zero_as_utc=True):
if tzstr == b'Z' or tzstr == b'z':
return tz.UTC
if len(tzstr) not in {3, 5, 6}:
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
if tzstr[0:1] == b'-':
mult = -1
elif tzstr[0:1] == b'+':
mult = 1
else:
raise ValueError('Time zone offset requires sign')
hours = int(tzstr[1:3])
if len(tzstr) == 3:
minutes = 0
else:
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
if zero_as_utc and hours == 0 and minutes == 0:
return tz.UTC
else:
if minutes > 59:
raise ValueError('Invalid minutes in time zone offset')
if hours > 23:
raise ValueError('Invalid hours in time zone offset')
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
DEFAULT_ISOPARSER = isoparser()
isoparse = DEFAULT_ISOPARSER.isoparse
venv\Lib\site-packages\dateutil\parser\_parser.py
# -*- coding: utf-8 -*-
"""
This module offers a generic date/time string parser which is able to parse
most known formats to represent a date and/or time.
This module attempts to be forgiving with regards to unlikely input formats,
returning a datetime object even for dates which are ambiguous. If an element
of a date/time stamp is omitted, the following rules are applied:
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
specified.
- If a time zone is omitted, a timezone-naive datetime is returned.
If any other elements are missing, they are taken from the
:class:`datetime.datetime` object passed to the parameter ``default``. If this
results in a day number exceeding the valid number of days per month, the
value falls back to the end of the month.
Additional resources about date/time string formats can be found below:
- `A summary of the international standard date and time notation
`_
- `W3C Date and Time Formats `_
- `Time Formats (Planetary Rings Node) `_
- `CPAN ParseDate module
`_
- `Java SimpleDateFormat Class
`_
"""
from __future__ import unicode_literals
import datetime
import re
import string
import time
import warnings
from calendar import monthrange
from io import StringIO
import six
from six import integer_types, text_type
from decimal import Decimal
from warnings import warn
from .. import relativedelta
from .. import tz
__all__ = ["parse", "parserinfo", "ParserError"]
# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
# making public and/or figuring out if there is something we can
# take off their plate.
class _timelex(object):
# Fractional seconds are sometimes split by a comma
_split_decimal = re.compile("([.,])")
def __init__(self, instream):
if isinstance(instream, (bytes, bytearray)):
instream = instream.decode()
if isinstance(instream, text_type):
instream = StringIO(instream)
elif getattr(instream, 'read', None) is None:
raise TypeError('Parser must be a string or character stream, not '
'{itype}'.format(itype=instream.__class__.__name__))
self.instream = instream
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
@classmethod
def split(cls, s):
return list(cls(s))
@classmethod
def isword(cls, nextchar):
""" Whether or not the next character is part of a word """
return nextchar.isalpha()
@classmethod
def isnum(cls, nextchar):
""" Whether the next character is part of a number """
return nextchar.isdigit()
@classmethod
def isspace(cls, nextchar):
""" Whether the next character is whitespace """
return nextchar.isspace()
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __len__(self):
return (sum(getattr(self, attr) is not None
for attr in self.__slots__))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"), # TODO: "Tues"
("Wed", "Wednesday"),
("Thu", "Thursday"), # TODO: "Thurs"
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"), # TODO: "Febr"
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z", "z"]
PERTAIN = ["of"]
TZOFFSET = {}
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
# "Anno Domini", "Year of Our Lord"]
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
"""
Converts two-digit years to year within [-50, 49]
range of self._year (current local time)
"""
# Function contract is that the year is always positive
assert year >= 0
if year < 100 and not century_specified:
# assume current century to start
year += self._century
if year >= self._year + 50: # if too far in future
year -= 100
elif year < self._year - 50: # if too far in past
year += 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if ((res.tzoffset == 0 and not res.tzname) or
(res.tzname == 'Z' or res.tzname == 'z')):
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class _ymd(list):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.century_specified = False
self.dstridx = None
self.mstridx = None
self.ystridx = None
@property
def has_year(self):
return self.ystridx is not None
@property
def has_month(self):
return self.mstridx is not None
@property
def has_day(self):
return self.dstridx is not None
def could_be_day(self, value):
if self.has_day:
return False
elif not self.has_month:
return 1 <= value <= 31
elif not self.has_year:
# Be permissive, assume leap year
month = self[self.mstridx]
return 1 <= value <= monthrange(2000, month)[1]
else:
month = self[self.mstridx]
year = self[self.ystridx]
return 1 <= value <= monthrange(year, month)[1]
def append(self, val, label=None):
if hasattr(val, '__len__'):
if val.isdigit() and len(val) > 2:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
elif val > 100:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
super(self.__class__, self).append(int(val))
if label == 'M':
if self.has_month:
raise ValueError('Month is already set')
self.mstridx = len(self) - 1
elif label == 'D':
if self.has_day:
raise ValueError('Day is already set')
self.dstridx = len(self) - 1
elif label == 'Y':
if self.has_year:
raise ValueError('Year is already set')
self.ystridx = len(self) - 1
def _resolve_from_stridxs(self, strids):
"""
Try to resolve the identities of year/month/day elements using
ystridx, mstridx, and dstridx, if enough of these are specified.
"""
if len(self) == 3 and len(strids) == 2:
# we can back out the remaining stridx value
missing = [x for x in range(3) if x not in strids.values()]
key = [x for x in ['y', 'm', 'd'] if x not in strids]
assert len(missing) == len(key) == 1
key = key[0]
val = missing[0]
strids[key] = val
assert len(self) == len(strids) # otherwise this should not be called
out = {key: self[strids[key]] for key in strids}
return (out.get('y'), out.get('m'), out.get('d'))
def resolve_ymd(self, yearfirst, dayfirst):
len_ymd = len(self)
year, month, day = (None, None, None)
strids = (('y', self.ystridx),
('m', self.mstridx),
('d', self.dstridx))
strids = {key: val for key, val in strids if val is not None}
if (len(self) == len(strids) > 0 or
(len(self) == 3 and len(strids) == 2)):
return self._resolve_from_stridxs(strids)
mstridx = self.mstridx
if len_ymd > 3:
raise ValueError("More than three YMD values")
elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
# One member, or two members with a month string
if mstridx is not None:
month = self[mstridx]
# since mstridx is 0 or 1, self[mstridx-1] always
# looks up the other element
other = self[mstridx - 1]
else:
other = self[0]
if len_ymd > 1 or mstridx is None:
if other > 31:
year = other
else:
day = other
elif len_ymd == 2:
# Two members with numbers
if self[0] > 31:
# 99-01
year, month = self
elif self[1] > 31:
# 01-99
month, year = self
elif dayfirst and self[1] <= 12:
# 13-01
day, month = self
else:
# 01-13
month, day = self
elif len_ymd == 3:
# Three members
if mstridx == 0:
if self[1] > 31:
# Apr-2003-25
month, year, day = self
else:
month, day, year = self
elif mstridx == 1:
if self[0] > 31 or (yearfirst and self[2] <= 31):
# 99-Jan-01
year, month, day = self
else:
# 01-Jan-01
# Give precedence to day-first, since
# two-digit years is usually hand-written.
day, month, year = self
elif mstridx == 2:
# WTF!?
if self[1] > 31:
# 01-99-Jan
day, year, month = self
else:
# 99-01-Jan
year, day, month = self
else:
if (self[0] > 31 or
self.ystridx == 0 or
(yearfirst and self[1] <= 12 and self[2] <= 31)):
# 99-01-01
if dayfirst and self[2] <= 12:
year, day, month = self
else:
year, month, day = self
elif self[0] > 12 or (dayfirst and self[1] <= 12):
# 13-01-01
day, month, year = self
else:
# 01-13-01
month, day, year = self
return year, month, day
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param \\*\\*kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ParserError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ParserError("Unknown string format: %s", timestr)
if len(res) == 0:
raise ParserError("String does not contain a date: %s", timestr)
try:
ret = self._build_naive(res, default)
except ValueError as e:
six.raise_from(ParserError(str(e) + ": %s", timestr), e)
if not ignoretz:
ret = self._build_tzaware(ret, res, tzinfos)
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm","any_unused_tokens"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
skipped_idxs = []
# year/month/day list
ymd = _ymd()
len_l = len(l)
i = 0
try:
while i < len_l:
# Check if it's a number
value_repr = l[i]
try:
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Numeric token
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
# Check weekday
elif info.weekday(l[i]) is not None:
value = info.weekday(l[i])
res.weekday = value
# Check month name
elif info.month(l[i]) is not None:
value = info.month(l[i])
ymd.append(value, 'M')
if i + 1 < len_l:
if l[i + 1] in ('-', '/'):
# Jan-01[-99]
sep = l[i + 1]
ymd.append(l[i + 2])
if i + 3 < len_l and l[i + 3] == sep:
# Jan-01-99
ymd.append(l[i + 4])
i += 2
i += 2
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
info.pertain(l[i + 2])):
# Jan of 01
# In this case, 01 is clearly year
if l[i + 4].isdigit():
# Convert it here to become unambiguous
value = int(l[i + 4])
year = str(info.convertyear(value))
ymd.append(year, 'Y')
else:
# Wrong guess
pass
# TODO: not hit in tests
i += 4
# Check am/pm
elif info.ampm(l[i]) is not None:
value = info.ampm(l[i])
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
if val_is_ampm:
res.hour = self._adjust_ampm(res.hour, value)
res.ampm = value
elif fuzzy:
skipped_idxs.append(i)
# Check for a timezone name
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i + 1 < len_l and l[i + 1] in ('+', '-'):
l[i + 1] = ('+', '-')[l[i + 1] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
# Check for a numbered timezone
elif res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
len_li = len(l[i + 1])
# TODO: check that l[i + 1] is integer?
if len_li == 4:
# -0300
hour_offset = int(l[i + 1][:2])
min_offset = int(l[i + 1][2:])
elif i + 2 < len_l and l[i + 2] == ':':
# -03:00
hour_offset = int(l[i + 1])
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
i += 2
elif len_li <= 2:
# -[0]3
hour_offset = int(l[i + 1][:2])
min_offset = 0
else:
raise ValueError(timestr)
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
# Look for a timezone name between parenthesis
if (i + 5 < len_l and
info.jump(l[i + 2]) and l[i + 3] == '(' and
l[i + 5] == ')' and
3 <= len(l[i + 4]) and
self._could_be_tzname(res.hour, res.tzname,
None, l[i + 4])):
# -0300 (BRST)
res.tzname = l[i + 4]
i += 4
i += 1
# Check jumps
elif not (info.jump(l[i]) or fuzzy):
raise ValueError(timestr)
else:
skipped_idxs.append(i)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
res.century_specified = ymd.century_specified
res.year = year
res.month = month
res.day = day
except (IndexError, ValueError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
return res, tuple(skipped_tokens)
else:
return res, None
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
# Token is a number
value_repr = tokens[idx]
try:
value = self._to_decimal(value_repr)
except Exception as e:
six.raise_from(ValueError('Unknown numeric token'), e)
len_li = len(value_repr)
len_l = len(tokens)
if (len(ymd) == 3 and len_li in (2, 4) and
res.hour is None and
(idx + 1 >= len_l or
(tokens[idx + 1] != ':' and
info.hms(tokens[idx + 1]) is None))):
# 19990101T23[59]
s = tokens[idx]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = tokens[idx]
if not ymd and '.' not in tokens[idx]:
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
# TODO: Check if res attributes already set.
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = self._parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = tokens[idx]
ymd.append(s[:4], 'Y')
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
if hms is not None:
# TODO: checking that hour/minute/second are not
# already set?
self._assign_hms(res, value_repr, hms)
elif idx + 2 < len_l and tokens[idx + 1] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
(res.minute, res.second) = self._parse_min_sec(value)
if idx + 4 < len_l and tokens[idx + 3] == ':':
res.second, res.microsecond = self._parsems(tokens[idx + 4])
idx += 2
idx += 2
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
sep = tokens[idx + 1]
ymd.append(value_repr)
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
if tokens[idx + 2].isdigit():
# 01-01[-01]
ymd.append(tokens[idx + 2])
else:
# 01-Jan[-01]
value = info.month(tokens[idx + 2])
if value is not None:
ymd.append(value, 'M')
else:
raise ValueError()
if idx + 3 < len_l and tokens[idx + 3] == sep:
# We have three members
value = info.month(tokens[idx + 4])
if value is not None:
ymd.append(value, 'M')
else:
ymd.append(tokens[idx + 4])
idx += 2
idx += 1
idx += 1
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
# 12 am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
idx += 1
else:
# Year, month or day
ymd.append(value)
idx += 1
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
# 12am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
idx += 1
elif ymd.could_be_day(value):
ymd.append(value)
elif not fuzzy:
raise ValueError()
return idx
def _find_hms_idx(self, idx, tokens, info, allow_jump):
len_l = len(tokens)
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
# There is an "h", "m", or "s" label following this token. We take
# assign the upcoming label to the current token.
# e.g. the "12" in 12h"
hms_idx = idx + 1
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
info.hms(tokens[idx+2]) is not None):
# There is a space and then an "h", "m", or "s" label.
# e.g. the "12" in "12 h"
hms_idx = idx + 2
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
# There is a "h", "m", or "s" preceding this token. Since neither
# of the previous cases was hit, there is no label following this
# token, so we use the previous label.
# e.g. the "04" in "12h04"
hms_idx = idx-1
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
info.hms(tokens[idx-2]) is not None):
# If we are looking at the final token, we allow for a
# backward-looking check to skip over a space.
# TODO: Are we sure this is the right condition here?
hms_idx = idx - 2
else:
hms_idx = None
return hms_idx
def _assign_hms(self, res, value_repr, hms):
# See GH issue #427, fixing float rounding
value = self._to_decimal(value_repr)
if hms == 0:
# Hour
res.hour = int(value)
if value % 1:
res.minute = int(60*(value % 1))
elif hms == 1:
(res.minute, res.second) = self._parse_min_sec(value)
elif hms == 2:
(res.second, res.microsecond) = self._parsems(value_repr)
def _could_be_tzname(self, hour, tzname, tzoffset, token):
return (hour is not None and
tzname is None and
tzoffset is None and
len(token) <= 5 and
(all(x in string.ascii_uppercase for x in token)
or token in self.info.UTCZONE))
def _ampm_valid(self, hour, ampm, fuzzy):
"""
For fuzzy parsing, 'a' or 'am' (both valid English words)
may erroneously trigger the AM/PM flag. Deal with that
here.
"""
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with AM or PM flag.')
elif not 0 <= hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for 12-hour clock.')
return val_is_ampm
def _adjust_ampm(self, hour, ampm):
if hour < 12 and ampm == 1:
hour += 12
elif hour == 12 and ampm == 0:
hour = 0
return hour
def _parse_min_sec(self, value):
# TODO: Every usage of this function sets res.second to the return
# value. Are there any cases where second will be returned as None and
# we *don't* want to set res.second = None?
minute = int(value)
second = None
sec_remainder = value % 1
if sec_remainder:
second = int(60 * sec_remainder)
return (minute, second)
def _parse_hms(self, idx, tokens, info, hms_idx):
# TODO: Is this going to admit a lot of false-positives for when we
# just happen to have digits and "h", "m" or "s" characters in non-date
# text? I guess hex hashes won't have that problem, but there's plenty
# of random junk out there.
if hms_idx is None:
hms = None
new_idx = idx
elif hms_idx > idx:
hms = info.hms(tokens[hms_idx])
new_idx = hms_idx
else:
# Looking backwards, increment one.
hms = info.hms(tokens[hms_idx]) + 1
new_idx = idx
return (new_idx, hms)
# ------------------------------------------------------------------
# Handling for individual tokens. These are kept as methods instead
# of functions for the sake of customizability via subclassing.
def _parsems(self, value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
def _to_decimal(self, val):
try:
decimal_value = Decimal(val)
# See GH 662, edge case, infinite value should not be converted
# via `_to_decimal`
if not decimal_value.is_finite():
raise ValueError("Converted decimal value is infinite or NaN")
except Exception as e:
msg = "Could not convert %s to decimal" % val
six.raise_from(ValueError(msg), e)
else:
return decimal_value
# ------------------------------------------------------------------
# Post-Parsing construction of datetime output. These are kept as
# methods instead of functions for the sake of customizability via
# subclassing.
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
if callable(tzinfos):
tzdata = tzinfos(tzname, tzoffset)
else:
tzdata = tzinfos.get(tzname)
# handle case where tzinfo is paased an options that returns None
# eg tzinfos = {'BRST' : None}
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(tzname, tzdata)
else:
raise TypeError("Offset must be tzinfo subclass, tz string, "
"or int offset.")
return tzinfo
def _build_tzaware(self, naive, res, tzinfos):
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
aware = naive.replace(tzinfo=tzinfo)
aware = self._assign_tzname(aware, res.tzname)
elif res.tzname and res.tzname in time.tzname:
aware = naive.replace(tzinfo=tz.tzlocal())
# Handle ambiguous local datetime
aware = self._assign_tzname(aware, res.tzname)
# This is mostly relevant for winter GMT zones parsed in the UK
if (aware.tzname() != res.tzname and
res.tzname in self.info.UTCZONE):
aware = aware.replace(tzinfo=tz.UTC)
elif res.tzoffset == 0:
aware = naive.replace(tzinfo=tz.UTC)
elif res.tzoffset:
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
elif not res.tzname and not res.tzoffset:
# i.e. no timezone information was found.
aware = naive
elif res.tzname:
# tz-like string was parsed but we don't know what to do
# with it
warnings.warn("tzname {tzname} identified but not understood. "
"Pass `tzinfos` argument in order to correctly "
"return a timezone-aware datetime. In a future "
"version, this will raise an "
"exception.".format(tzname=res.tzname),
category=UnknownTimezoneWarning)
aware = naive
return aware
def _build_naive(self, res, default):
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back
# to the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
naive = default.replace(**repl)
if res.weekday is not None and not res.day:
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
return naive
def _assign_tzname(self, dt, tzname):
if dt.tzname() != tzname:
new_dt = tz.enfold(dt, fold=1)
if new_dt.tzname() == tzname:
return new_dt
return dt
def _recombine_skipped(self, tokens, skipped_idxs):
"""
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
>>> skipped_idxs = [0, 1, 2, 5]
>>> _recombine_skipped(tokens, skipped_idxs)
["foo bar", "baz"]
"""
skipped_tokens = []
for i, idx in enumerate(sorted(skipped_idxs)):
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
else:
skipped_tokens.append(tokens[idx])
return skipped_tokens
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
"""
Parse a string in one of the supported formats, using the
``parserinfo`` parameters.
:param timestr:
A string containing a date/time stamp.
:param parserinfo:
A :class:`parserinfo` object containing parameters for the parser.
If ``None``, the default arguments to the :class:`parserinfo`
constructor are used.
The ``**kwargs`` parameter takes the following keyword arguments:
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM and
YMD. If set to ``None``, this value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
be the year, otherwise the last number is taken to be the year. If
this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ParserError:
Raised for invalid or unknown string formats, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date would
be created.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
used_idxs = list()
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
for ii in range(j):
used_idxs.append(ii)
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
used_idxs.append(i)
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 +
int(l[i + 2]) * 60) * signal)
used_idxs.append(i)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
used_idxs.append(i)
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789+-"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
used_idxs.append(i)
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
used_idxs.append(i)
i += 1
else:
value = int(l[i])
used_idxs.append(i)
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
used_idxs.append(i)
i += 2
x.time = int(l[i])
used_idxs.append(i)
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
used_idxs.append(i)
i += 1
else:
signal = 1
used_idxs.append(i)
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
# This was a made-up format that is not in normal use
warn(('Parsed time zone "%s"' % tzstr) +
'is in a non-standard dateutil-specific format, which ' +
'is now deprecated; support for parsing this format ' +
'will be removed in future versions. It is recommended ' +
'that you switch to a standard format like the GNU ' +
'TZ variable format.', tz.DeprecatedTzFormatWarning)
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
used_idxs.append(i)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
used_idxs.append(i)
i += 1
x.month = int(l[i])
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
used_idxs.append(i)
i += 1
if i < len_l and l[i] == '/':
used_idxs.append(i)
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
used_idxs.append(i)
i += 2
if i + 1 < len_l and l[i + 1] == ':':
used_idxs.append(i)
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
used_idxs.append(i)
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
unused_idxs = set(range(len_l)).difference(used_idxs)
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
class ParserError(ValueError):
"""Exception subclass used for any failure to parse a datetime string.
This is a subclass of :py:exc:`ValueError`, and should be raised any time
earlier versions of ``dateutil`` would have raised ``ValueError``.
.. versionadded:: 2.8.1
"""
def __str__(self):
try:
return self.args[0] % self.args[1:]
except (TypeError, IndexError):
return super(ParserError, self).__str__()
def __repr__(self):
args = ", ".join("'%s'" % arg for arg in self.args)
return "%s(%s)" % (self.__class__.__name__, args)
class UnknownTimezoneWarning(RuntimeWarning):
"""Raised when the parser finds a timezone it cannot parse into a tzinfo.
.. versionadded:: 2.7.0
"""
# vim:ts=4:sw=4:et
# -*- coding: utf-8 -*-
from ._parser import parse, parser, parserinfo, ParserError
from ._parser import DEFAULTPARSER, DEFAULTTZPARSER
from ._parser import UnknownTimezoneWarning
from ._parser import __doc__
from .isoparser import isoparser, isoparse
__all__ = ['parse', 'parser', 'parserinfo',
'isoparse', 'isoparser',
'ParserError',
'UnknownTimezoneWarning']
###
# Deprecate portions of the private interface so that downstream code that
# is improperly relying on it is given *some* notice.
def __deprecated_private_func(f):
from functools import wraps
import warnings
msg = ('{name} is a private function and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=f.__name__)
@wraps(f)
def deprecated_func(*args, **kwargs):
warnings.warn(msg, DeprecationWarning)
return f(*args, **kwargs)
return deprecated_func
def __deprecate_private_class(c):
import warnings
msg = ('{name} is a private class and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=c.__name__)
class private_class(c):
__doc__ = c.__doc__
def __init__(self, *args, **kwargs):
warnings.warn(msg, DeprecationWarning)
super(private_class, self).__init__(*args, **kwargs)
private_class.__name__ = c.__name__
return private_class
from ._parser import _timelex, _resultbase
from ._parser import _tzparser, _parsetz
_timelex = __deprecate_private_class(_timelex)
_tzparser = __deprecate_private_class(_tzparser)
_resultbase = __deprecate_private_class(_resultbase)
_parsetz = __deprecated_private_func(_parsetz)
venv\Lib\site-packages\dateutil\tz\tz.py
# -*- coding: utf-8 -*-
"""
This module offers timezone implementations subclassing the abstract
:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format
files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`,
etc), TZ environment string (in all known formats), given ranges (with help
from relative deltas), local machine timezone, fixed offset timezone, and UTC
timezone.
"""
import datetime
import struct
import time
import sys
import os
import bisect
import weakref
from collections import OrderedDict
import six
from six import string_types
from six.moves import _thread
from ._common import tzname_in_python2, _tzinfo
from ._common import tzrangebase, enfold
from ._common import _validate_fromutc_inputs
from ._factories import _TzSingleton, _TzOffsetFactory
from ._factories import _TzStrFactory
try:
from .win import tzwin, tzwinlocal
except ImportError:
tzwin = tzwinlocal = None
# For warning about rounding tzinfo
from warnings import warn
ZERO = datetime.timedelta(0)
EPOCH = datetime.datetime(1970, 1, 1, 0, 0)
EPOCHORDINAL = EPOCH.toordinal()
@six.add_metaclass(_TzSingleton)
class tzutc(datetime.tzinfo):
"""
This is a tzinfo object that represents the UTC time zone.
**Examples:**
.. doctest::
>>> from datetime import *
>>> from dateutil.tz import *
>>> datetime.now()
datetime.datetime(2003, 9, 27, 9, 40, 1, 521290)
>>> datetime.now(tzutc())
datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc())
>>> datetime.now(tzutc()).tzname()
'UTC'
.. versionchanged:: 2.7.0
``tzutc()`` is now a singleton, so the result of ``tzutc()`` will
always return the same object.
.. doctest::
>>> from dateutil.tz import tzutc, UTC
>>> tzutc() is tzutc()
True
>>> tzutc() is UTC
True
"""
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Fast track version of fromutc() returns the original ``dt`` object for
any valid :py:class:`datetime.datetime` object.
"""
return dt
def __eq__(self, other):
if not isinstance(other, (tzutc, tzoffset)):
return NotImplemented
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
#: Convenience constant providing a :class:`tzutc()` instance
#:
#: .. versionadded:: 2.7.0
UTC = tzutc()
@six.add_metaclass(_TzOffsetFactory)
class tzoffset(datetime.tzinfo):
"""
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object).
"""
def __init__(self, name, offset):
self._name = name
try:
# Allow a timedelta
offset = offset.total_seconds()
except (TypeError, AttributeError):
pass
self._offset = datetime.timedelta(seconds=_get_supported_offset(offset))
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
@_validate_fromutc_inputs
def fromutc(self, dt):
return dt + self._offset
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
int(self._offset.total_seconds()))
__reduce__ = object.__reduce__
class tzlocal(_tzinfo):
"""
A :class:`tzinfo` subclass built around the ``time`` timezone functions.
"""
def __init__(self):
super(tzlocal, self).__init__()
self._std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
self._dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
self._dst_offset = self._std_offset
self._dst_saved = self._dst_offset - self._std_offset
self._hasdst = bool(self._dst_saved)
self._tznames = tuple(time.tzname)
def utcoffset(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset - self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._tznames[self._isdst(dt)]
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
naive_dst = self._naive_is_dst(dt)
return (not naive_dst and
(naive_dst != self._naive_is_dst(dt - self._dst_saved)))
def _naive_is_dst(self, dt):
timestamp = _datetime_to_timestamp(dt)
return time.localtime(timestamp + time.timezone).tm_isdst
def _isdst(self, dt, fold_naive=True):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
# >>> import tz, datetime
# >>> t = tz.tzlocal()
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
#
# Here is a more stable implementation:
#
if not self._hasdst:
return False
# Check for ambiguous times:
dstval = self._naive_is_dst(dt)
fold = getattr(dt, 'fold', None)
if self.is_ambiguous(dt):
if fold is not None:
return not self._fold(dt)
else:
return True
return dstval
def __eq__(self, other):
if isinstance(other, tzlocal):
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
elif isinstance(other, tzutc):
return (not self._hasdst and
self._tznames[0] in {'UTC', 'GMT'} and
self._std_offset == ZERO)
elif isinstance(other, tzoffset):
return (not self._hasdst and
self._tznames[0] == other._name and
self._std_offset == other._offset)
else:
return NotImplemented
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr",
"isstd", "isgmt", "dstoffset"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return NotImplemented
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt and
self.dstoffset == other.dstoffset)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class _tzfile(object):
"""
Lightweight class for holding the relevant transition and time zone
information read from binary tzfiles.
"""
attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
def __init__(self, **kwargs):
for attr in self.attrs:
setattr(self, attr, kwargs.get(attr, None))
class tzfile(_tzinfo):
"""
This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
format timezone files to extract current and historical zone information.
:param fileobj:
This can be an opened file stream or a file name that the time zone
information can be read from.
:param filename:
This is an optional parameter specifying the source of the time zone
information in the event that ``fileobj`` is a file object. If omitted
and ``fileobj`` is a file stream, this parameter will be set either to
``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
See `Sources for Time Zone and Daylight Saving Time Data
`_ for more information.
Time zone files can be compiled from the `IANA Time Zone database files
`_ with the `zic time zone compiler
`_
.. note::
Only construct a ``tzfile`` directly if you have a specific timezone
file on disk that you want to read into a Python ``tzinfo`` object.
If you want to get a ``tzfile`` representing a specific IANA zone,
(e.g. ``'America/New_York'``), you should call
:func:`dateutil.tz.gettz` with the zone identifier.
**Examples:**
Using the US Eastern time zone as an example, we can see that a ``tzfile``
provides time zone information for the standard Daylight Saving offsets:
.. testsetup:: tzfile
from dateutil.tz import gettz
from datetime import datetime
.. doctest:: tzfile
>>> NYC = gettz('America/New_York')
>>> NYC
tzfile('/usr/share/zoneinfo/America/New_York')
>>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST
2016-01-03 00:00:00-05:00
>>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT
2016-07-07 00:00:00-04:00
The ``tzfile`` structure contains a fully history of the time zone,
so historical dates will also have the right offsets. For example, before
the adoption of the UTC standards, New York used local solar mean time:
.. doctest:: tzfile
>>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT
1901-04-12 00:00:00-04:56
And during World War II, New York was on "Eastern War Time", which was a
state of permanent daylight saving time:
.. doctest:: tzfile
>>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT
1944-02-07 00:00:00-04:00
"""
def __init__(self, fileobj, filename=None):
super(tzfile, self).__init__()
file_opened_here = False
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
file_opened_here = True
elif filename is not None:
self._filename = filename
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
if fileobj is not None:
if not file_opened_here:
fileobj = _nullcontext(fileobj)
with fileobj as file_stream:
tzobj = self._read_tzfile(file_stream)
self._set_tzdata(tzobj)
def _set_tzdata(self, tzobj):
""" Set the time zone data of this object from a _tzfile object """
# Copy the relevant attributes over as private attributes
for attr in _tzfile.attrs:
setattr(self, '_' + attr, getattr(tzobj, attr))
def _read_tzfile(self, fileobj):
out = _tzfile()
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4)))
else:
out.trans_list_utc = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
out.trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
out.trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now (but seek for correct file position)
if leapcnt:
fileobj.seek(leapcnt * 8, os.SEEK_CUR)
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# Build ttinfo list
out.ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
gmtoff = _get_supported_offset(gmtoff)
tti = _ttinfo()
tti.offset = gmtoff
tti.dstoffset = datetime.timedelta(0)
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
out.ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
out.ttinfo_std = None
out.ttinfo_dst = None
out.ttinfo_before = None
if out.ttinfo_list:
if not out.trans_list_utc:
out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = out.trans_idx[i]
if not out.ttinfo_std and not tti.isdst:
out.ttinfo_std = tti
elif not out.ttinfo_dst and tti.isdst:
out.ttinfo_dst = tti
if out.ttinfo_std and out.ttinfo_dst:
break
else:
if out.ttinfo_dst and not out.ttinfo_std:
out.ttinfo_std = out.ttinfo_dst
for tti in out.ttinfo_list:
if not tti.isdst:
out.ttinfo_before = tti
break
else:
out.ttinfo_before = out.ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
lastdst = None
lastoffset = None
lastdstoffset = None
lastbaseoffset = None
out.trans_list = []
for i, tti in enumerate(out.trans_idx):
offset = tti.offset
dstoffset = 0
if lastdst is not None:
if tti.isdst:
if not lastdst:
dstoffset = offset - lastoffset
if not dstoffset and lastdstoffset:
dstoffset = lastdstoffset
tti.dstoffset = datetime.timedelta(seconds=dstoffset)
lastdstoffset = dstoffset
# If a time zone changes its base offset during a DST transition,
# then you need to adjust by the previous base offset to get the
# transition time in local time. Otherwise you use the current
# base offset. Ideally, I would have some mathematical proof of
# why this is true, but I haven't really thought about it enough.
baseoffset = offset - dstoffset
adjustment = baseoffset
if (lastbaseoffset is not None and baseoffset != lastbaseoffset
and tti.isdst != lastdst):
# The base DST has changed
adjustment = lastbaseoffset
lastdst = tti.isdst
lastoffset = offset
lastbaseoffset = baseoffset
out.trans_list.append(out.trans_list_utc[i] + adjustment)
out.trans_idx = tuple(out.trans_idx)
out.trans_list = tuple(out.trans_list)
out.trans_list_utc = tuple(out.trans_list_utc)
return out
def _find_last_transition(self, dt, in_utc=False):
# If there's no list, there are no transitions to find
if not self._trans_list:
return None
timestamp = _datetime_to_timestamp(dt)
# Find where the timestamp fits in the transition list - if the
# timestamp is a transition time, it's part of the "after" period.
trans_list = self._trans_list_utc if in_utc else self._trans_list
idx = bisect.bisect_right(trans_list, timestamp)
# We want to know when the previous transition was, so subtract off 1
return idx - 1
def _get_ttinfo(self, idx):
# For no list or after the last transition, default to _ttinfo_std
if idx is None or (idx + 1) >= len(self._trans_list):
return self._ttinfo_std
# If there is a list and the time is before it, return _ttinfo_before
if idx < 0:
return self._ttinfo_before
return self._trans_idx[idx]
def _find_ttinfo(self, dt):
idx = self._resolve_ambiguous_time(dt)
return self._get_ttinfo(idx)
def fromutc(self, dt):
"""
The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
:param dt:
A :py:class:`datetime.datetime` object.
:raises TypeError:
Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
:raises ValueError:
Raised if this is called with a ``dt`` which does not have this
``tzinfo`` attached.
:return:
Returns a :py:class:`datetime.datetime` object representing the
wall time in ``self``'s time zone.
"""
# These isinstance checks are in datetime.tzinfo, so we'll preserve
# them, even if we don't care about duck typing.
if not isinstance(dt, datetime.datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# First treat UTC as wall time and get the transition we're in.
idx = self._find_last_transition(dt, in_utc=True)
tti = self._get_ttinfo(idx)
dt_out = dt + datetime.timedelta(seconds=tti.offset)
fold = self.is_ambiguous(dt_out, idx=idx)
return enfold(dt_out, fold=int(fold))
def is_ambiguous(self, dt, idx=None):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if idx is None:
idx = self._find_last_transition(dt)
# Calculate the difference in offsets from current to previous
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx is None or idx <= 0:
return False
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx] # Transition time
return timestamp < tt + od
def _resolve_ambiguous_time(self, dt):
idx = self._find_last_transition(dt)
# If we have no transitions, return the index
_fold = self._fold(dt)
if idx is None or idx == 0:
return idx
# If it's ambiguous and we're in a fold, shift to a different index.
idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
return idx - idx_offset
def utcoffset(self, dt):
if dt is None:
return None
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if dt is None:
return None
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.dstoffset
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std or dt is None:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return NotImplemented
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
return self.__reduce_ex__(None)
def __reduce_ex__(self, protocol):
return (self.__class__, (None, self._filename), self.__dict__)
class tzrange(tzrangebase):
"""
The ``tzrange`` object is a time zone specified by a set of offsets and
abbreviations, equivalent to the way the ``TZ`` variable can be specified
in POSIX-like systems, but using Python delta objects to specify DST
start, end and offsets.
:param stdabbr:
The abbreviation for standard time (e.g. ``'EST'``).
:param stdoffset:
An integer or :class:`datetime.timedelta` object or equivalent
specifying the base offset from UTC.
If unspecified, +00:00 is used.
:param dstabbr:
The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
If specified, with no other DST information, DST is assumed to occur
and the default behavior or ``dstoffset``, ``start`` and ``end`` is
used. If unspecified and no other DST information is specified, it
is assumed that this zone has no DST.
If this is unspecified and other DST information is *is* specified,
DST occurs in the zone but the time zone abbreviation is left
unchanged.
:param dstoffset:
A an integer or :class:`datetime.timedelta` object or equivalent
specifying the UTC offset during DST. If unspecified and any other DST
information is specified, it is assumed to be the STD offset +1 hour.
:param start:
A :class:`relativedelta.relativedelta` object or equivalent specifying
the time and time of year that daylight savings time starts. To
specify, for example, that DST starts at 2AM on the 2nd Sunday in
March, pass:
``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
If unspecified and any other DST information is specified, the default
value is 2 AM on the first Sunday in April.
:param end:
A :class:`relativedelta.relativedelta` object or equivalent
representing the time and time of year that daylight savings time
ends, with the same specification method as in ``start``. One note is
that this should point to the first time in the *standard* zone, so if
a transition occurs at 2AM in the DST zone and the clocks are set back
1 hour to 1AM, set the ``hours`` parameter to +1.
**Examples:**
.. testsetup:: tzrange
from dateutil.tz import tzrange, tzstr
.. doctest:: tzrange
>>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
True
>>> from dateutil.relativedelta import *
>>> range1 = tzrange("EST", -18000, "EDT")
>>> range2 = tzrange("EST", -18000, "EDT", -14400,
... relativedelta(hours=+2, month=4, day=1,
... weekday=SU(+1)),
... relativedelta(hours=+1, month=10, day=31,
... weekday=SU(-1)))
>>> tzstr('EST5EDT') == range1 == range2
True
"""
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
try:
stdoffset = stdoffset.total_seconds()
except (TypeError, AttributeError):
pass
try:
dstoffset = dstoffset.total_seconds()
except (TypeError, AttributeError):
pass
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = bool(self._start_delta)
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
base_year = datetime.datetime(year, 1, 1)
start = base_year + self._start_delta
end = base_year + self._end_delta
return (start, end)
def __eq__(self, other):
if not isinstance(other, tzrange):
return NotImplemented
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
@property
def _dst_base_offset(self):
return self._dst_base_offset_
@six.add_metaclass(_TzStrFactory)
class tzstr(tzrange):
"""
``tzstr`` objects are time zone objects specified by a time-zone string as
it would be passed to a ``TZ`` variable on POSIX-style systems (see
the `GNU C Library: TZ Variable`_ for more details).
There is one notable exception, which is that POSIX-style time zones use an
inverted offset format, so normally ``GMT+3`` would be parsed as an offset
3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
behavior, pass a ``True`` value to ``posix_offset``.
The :class:`tzrange` object provides the same functionality, but is
specified using :class:`relativedelta.relativedelta` objects. rather than
strings.
:param s:
A time zone string in ``TZ`` variable format. This can be a
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x:
:class:`unicode`) or a stream emitting unicode characters
(e.g. :class:`StringIO`).
:param posix_offset:
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
POSIX standard.
.. caution::
Prior to version 2.7.0, this function also supported time zones
in the format:
* ``EST5EDT,4,0,6,7200,10,0,26,7200,3600``
* ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600``
This format is non-standard and has been deprecated; this function
will raise a :class:`DeprecatedTZFormatWarning` until
support is removed in a future version.
.. _`GNU C Library: TZ Variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
"""
def __init__(self, s, posix_offset=False):
global parser
from dateutil.parser import _parser as parser
self._s = s
res = parser._parsetz(s)
if res is None or res.any_unused_tokens:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC") and not posix_offset:
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
self.hasdst = bool(self._start_delta)
def _delta(self, x, isend=0):
from dateutil import relativedelta
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset - self._std_offset
kwargs["seconds"] -= delta.seconds + delta.days * 86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(_tzinfo):
def __init__(self, tzid, comps=[]):
super(_tzicalvtz, self).__init__()
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
self._cache_lock = _thread.allocate_lock()
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
with self._cache_lock:
return self._cachecomp[self._cachedate.index(
(dt, self._fold(dt)))]
except ValueError:
pass
lastcompdt = None
lastcomp = None
for comp in self._comps:
compdt = self._find_compdt(comp, dt)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
with self._cache_lock:
self._cachedate.insert(0, (dt, self._fold(dt)))
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def _find_compdt(self, comp, dt):
if comp.tzoffsetdiff < ZERO and self._fold(dt):
dt -= comp.tzoffsetdiff
compdt = comp.rrule.before(dt, inc=True)
return compdt
def utcoffset(self, dt):
if dt is None:
return None
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
"""
This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects.
:param `fileobj`:
A file or stream in iCalendar format, which should be UTF-8 encoded
with CRLF endings.
.. _`RFC 5545`: https://tools.ietf.org/html/rfc5545
"""
def __init__(self, fileobj):
global rrule
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
# ical should be encoded in UTF-8 with CRLF
fileobj = open(fileobj, 'r')
else:
self._s = getattr(fileobj, 'name', repr(fileobj))
fileobj = _nullcontext(fileobj)
self._vtz = {}
with fileobj as fobj:
self._parse_rfc(fobj.read())
def keys(self):
"""
Retrieves the available time zones as a list.
"""
return list(self._vtz.keys())
def get(self, tzid=None):
"""
Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
:param tzid:
If there is exactly one time zone available, omitting ``tzid``
or passing :py:const:`None` value returns it. Otherwise a valid
key (which can be retrieved from :func:`keys`) is required.
:raises ValueError:
Raised if ``tzid`` is not specified but there are either more
or fewer than 1 zone defined.
:returns:
Returns either a :py:class:`datetime.tzinfo` object representing
the relevant time zone or :py:const:`None` if the ``tzid`` was
not found.
"""
if tzid is None:
if len(self._vtz) == 0:
raise ValueError("no timezones defined")
elif len(self._vtz) > 1:
raise ValueError("more than one timezone available")
tzid = next(iter(self._vtz))
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0] == '+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
elif len(s) == 6:
return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
else:
raise ValueError("invalid offset: " + s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError(
"at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
# DTSTART in VTIMEZONE takes a subset of valid RRULE
# values under RFC 5545.
for parm in parms:
if parm != 'VALUE=DATE-TIME':
msg = ('Unsupported DTSTART param in ' +
'VTIMEZONE: ' + parm)
raise ValueError(msg)
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError(
"unsupported %s parm: %s " % (name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError(
"unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError(
"unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError(
"unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo",
"/usr/lib/zoneinfo",
"/usr/share/lib/zoneinfo",
"/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def __get_gettz():
tzlocal_classes = (tzlocal,)
if tzwinlocal is not None:
tzlocal_classes += (tzwinlocal,)
class GettzFunc(object):
"""
Retrieve a time zone object from a string representation
This function is intended to retrieve the :py:class:`tzinfo` subclass
that best represents the time zone that would be used if a POSIX
`TZ variable`_ were set to the same value.
If no argument or an empty string is passed to ``gettz``, local time
is returned:
.. code-block:: python3
>>> gettz()
tzfile('/etc/localtime')
This function is also the preferred way to map IANA tz database keys
to :class:`tzfile` objects:
.. code-block:: python3
>>> gettz('Pacific/Kiritimati')
tzfile('/usr/share/zoneinfo/Pacific/Kiritimati')
On Windows, the standard is extended to include the Windows-specific
zone names provided by the operating system:
.. code-block:: python3
>>> gettz('Egypt Standard Time')
tzwin('Egypt Standard Time')
Passing a GNU ``TZ`` style string time zone specification returns a
:class:`tzstr` object:
.. code-block:: python3
>>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
:param name:
A time zone name (IANA, or, on Windows, Windows keys), location of
a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone
specifier. An empty string, no argument or ``None`` is interpreted
as local time.
:return:
Returns an instance of one of ``dateutil``'s :py:class:`tzinfo`
subclasses.
.. versionchanged:: 2.7.0
After version 2.7.0, any two calls to ``gettz`` using the same
input strings will return the same object:
.. code-block:: python3
>>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago')
True
In addition to improving performance, this ensures that
`"same zone" semantics`_ are used for datetimes in the same zone.
.. _`TZ variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
.. _`"same zone" semantics`:
https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html
"""
def __init__(self):
self.__instances = weakref.WeakValueDictionary()
self.__strong_cache_size = 8
self.__strong_cache = OrderedDict()
self._cache_lock = _thread.allocate_lock()
def __call__(self, name=None):
with self._cache_lock:
rv = self.__instances.get(name, None)
if rv is None:
rv = self.nocache(name=name)
if not (name is None
or isinstance(rv, tzlocal_classes)
or rv is None):
# tzlocal is slightly more complicated than the other
# time zone providers because it depends on environment
# at construction time, so don't cache that.
#
# We also cannot store weak references to None, so we
# will also not store that.
self.__instances[name] = rv
else:
# No need for strong caching, return immediately
return rv
self.__strong_cache[name] = self.__strong_cache.pop(name, rv)
if len(self.__strong_cache) > self.__strong_cache_size:
self.__strong_cache.popitem(last=False)
return rv
def set_cache_size(self, size):
with self._cache_lock:
self.__strong_cache_size = size
while len(self.__strong_cache) > size:
self.__strong_cache.popitem(last=False)
def cache_clear(self):
with self._cache_lock:
self.__instances = weakref.WeakValueDictionary()
self.__strong_cache.clear()
@staticmethod
def nocache(name=None):
"""A non-cached version of gettz"""
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name in ("", ":"):
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
try:
if name.startswith(":"):
name = name[1:]
except TypeError as e:
if isinstance(name, bytes):
new_msg = "gettz argument should be str, not bytes"
six.raise_from(TypeError(new_msg), e)
else:
raise
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except (WindowsError, UnicodeEncodeError):
# UnicodeEncodeError is for Python 2.7 compat
tz = None
if not tz:
from dateutil.zoneinfo import get_zonefile_instance
tz = get_zonefile_instance().get(name)
if not tz:
for c in name:
# name is not a tzstr unless it has at least
# one offset. For short values of "name", an
# explicit for loop seems to be the fastest way
# To determine if a string contains a digit
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = UTC
elif name in time.tzname:
tz = tzlocal()
return tz
return GettzFunc()
gettz = __get_gettz()
del __get_gettz
def datetime_exists(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
would fall in a gap.
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" exists in
``tz``.
.. versionadded:: 2.7.0
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
dt = dt.replace(tzinfo=None)
# This is essentially a test of whether or not the datetime can survive
# a round trip to UTC.
dt_rt = dt.replace(tzinfo=tz).astimezone(UTC).astimezone(tz)
dt_rt = dt_rt.replace(tzinfo=None)
return dt == dt_rt
def datetime_ambiguous(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
is ambiguous (i.e if there are two times differentiated only by their DST
status).
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" is ambiguous in
``tz``.
.. versionadded:: 2.6.0
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
# If a time zone defines its own "is_ambiguous" function, we'll use that.
is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
if is_ambiguous_fn is not None:
try:
return tz.is_ambiguous(dt)
except Exception:
pass
# If it doesn't come out and tell us it's ambiguous, we'll just check if
# the fold attribute has any effect on this particular date and time.
dt = dt.replace(tzinfo=tz)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dst = wall_0.dst() == wall_1.dst()
return not (same_offset and same_dst)
def resolve_imaginary(dt):
"""
Given a datetime that may be imaginary, return an existing datetime.
This function assumes that an imaginary datetime represents what the
wall time would be in a zone had the offset transition not occurred, so
it will always fall forward by the transition's change in offset.
.. doctest::
>>> from dateutil import tz
>>> from datetime import datetime
>>> NYC = tz.gettz('America/New_York')
>>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC)))
2017-03-12 03:30:00-04:00
>>> KIR = tz.gettz('Pacific/Kiritimati')
>>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR)))
1995-01-02 12:30:00+14:00
As a note, :func:`datetime.astimezone` is guaranteed to produce a valid,
existing datetime, so a round-trip to and from UTC is sufficient to get
an extant datetime, however, this generally "falls back" to an earlier time
rather than falling forward to the STD side (though no guarantees are made
about this behavior).
:param dt:
A :class:`datetime.datetime` which may or may not exist.
:return:
Returns an existing :class:`datetime.datetime`. If ``dt`` was not
imaginary, the datetime returned is guaranteed to be the same object
passed to the function.
.. versionadded:: 2.7.0
"""
if dt.tzinfo is not None and not datetime_exists(dt):
curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset()
old_offset = (dt - datetime.timedelta(hours=24)).utcoffset()
dt += curr_offset - old_offset
return dt
def _datetime_to_timestamp(dt):
"""
Convert a :class:`datetime.datetime` object to an epoch timestamp in
seconds since January 1, 1970, ignoring the time zone.
"""
return (dt.replace(tzinfo=None) - EPOCH).total_seconds()
if sys.version_info >= (3, 6):
def _get_supported_offset(second_offset):
return second_offset
else:
def _get_supported_offset(second_offset):
# For python pre-3.6, round to full-minutes if that's not the case.
# Python's datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 or https://bugs.python.org/issue5288
# for some information.
old_offset = second_offset
calculated_offset = 60 * ((second_offset + 30) // 60)
return calculated_offset
try:
# Python 3.7 feature
from contextlib import nullcontext as _nullcontext
except ImportError:
class _nullcontext(object):
"""
Class for wrapping contexts so that they are passed through in a
with statement.
"""
def __init__(self, context):
self.context = context
def __enter__(self):
return self.context
def __exit__(*args, **kwargs):
pass
# vim:ts=4:sw=4:et
venv\Lib\site-packages\dateutil\tz\win.py
# -*- coding: utf-8 -*-
"""
This module provides an interface to the native time zone data on Windows,
including :py:class:`datetime.tzinfo` implementations.
Attempting to import this module on a non-Windows platform will raise an
:py:obj:`ImportError`.
"""
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six.moves import winreg
from six import text_type
try:
import ctypes
from ctypes import wintypes
except ValueError:
# ValueError is raised on non-Windows systems for some horrible reason.
raise ImportError("Running tzwin on non-Windows system")
from ._common import tzrangebase
__all__ = ["tzwin", "tzwinlocal", "tzres"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzres(object):
"""
Class for accessing ``tzres.dll``, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
.. note::
Offsets found in the registry are generally of the form
``@tzres.dll,-114``. The offset in this case is 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
class tzwinbase(tzrangebase):
"""tzinfo class based on win32's timezones available in the registry."""
def __init__(self):
raise NotImplementedError('tzwinbase is an abstract base class')
def __eq__(self, other):
# Compare on all relevant dimensions, including name.
if not isinstance(other, tzwinbase):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._stddayofweek == other._stddayofweek and
self._dstdayofweek == other._dstdayofweek and
self._stdweeknumber == other._stdweeknumber and
self._dstweeknumber == other._dstweeknumber and
self._stdhour == other._stdhour and
self._dsthour == other._dsthour and
self._stdminute == other._stdminute and
self._dstminute == other._dstminute and
self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr)
@staticmethod
def list():
"""Return a list of all time zones known to the system."""
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
return result
def display(self):
"""
Return the display name of the time zone.
"""
return self._display
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
def _get_hasdst(self):
return self._dstmonth != 0
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzwin(tzwinbase):
"""
Time zone object created from the zone info in the Windows registry
These are similar to :py:class:`dateutil.tz.tzrange` objects in that
the time zone data is provided in the format of a single offset rule
for either 0 or 2 time zone transitions per year.
:param: name
The name of a Windows time zone key, e.g. "Eastern Standard Time".
The full list of keys can be retrieved with :func:`tzwin.list`.
"""
def __init__(self, name):
self._name = name
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
"""
Class representing the local time zone information in the Windows registry
While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time`
module) to retrieve time zone information, ``tzwinlocal`` retrieves the
rules directly from the Windows registry and creates an object like
:class:`dateutil.tz.tzwin`.
Because Windows does not have an equivalent of :func:`time.tzset`, on
Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the
time zone settings *at the time that the process was started*, meaning
changes to the machine's time zone settings during the run of a program
on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`.
Because ``tzwinlocal`` reads the registry directly, it is unaffected by
this issue.
"""
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
sn=self._std_abbr)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"]-keydict["StandardBias"]
dstoffset = stdoffset-keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout
venv\Lib\site-packages\dateutil\tz\_common.py
from six import PY2
from functools import wraps
from datetime import datetime, timedelta, tzinfo
ZERO = timedelta(0)
__all__ = ['tzname_in_python2', 'enfold']
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
if PY2:
@wraps(namefunc)
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None:
name = name.encode()
return name
return adjust_encoding
else:
return namefunc
# The following is adapted from Alexander Belopolsky's tz library
# https://github.com/abalkin/tz
if hasattr(datetime, 'fold'):
# This is the pre-python 3.6 fold situation
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
return dt.replace(fold=fold)
else:
class _DatetimeWithFold(datetime):
"""
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
"""
__slots__ = ()
def replace(self, *args, **kwargs):
"""
Return a datetime with the same attributes, except for those
attributes given new values by whichever keyword arguments are
specified. Note that tzinfo=None can be specified to create a naive
datetime from an aware datetime with no conversion of date and time
data.
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
return a ``datetime.datetime`` even if ``fold`` is unchanged.
"""
argnames = (
'year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond', 'tzinfo'
)
for arg, argname in zip(args, argnames):
if argname in kwargs:
raise TypeError('Duplicate argument: {}'.format(argname))
kwargs[argname] = arg
for argname in argnames:
if argname not in kwargs:
kwargs[argname] = getattr(self, argname)
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
return dt_class(**kwargs)
@property
def fold(self):
return 1
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
if getattr(dt, 'fold', 0) == fold:
return dt
args = dt.timetuple()[:6]
args += (dt.microsecond, dt.tzinfo)
if fold:
return _DatetimeWithFold(*args)
else:
return datetime(*args)
def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc
class _tzinfo(tzinfo):
"""
Base class for all ``dateutil`` ``tzinfo`` objects.
"""
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold
def _fold(self, dt):
return getattr(dt, 'fold', 0)
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold)
class tzrangebase(_tzinfo):
"""
This is an abstract base class for time zones represented by an annual
transition into and out of DST. Child classes should implement the following
methods:
* ``__init__(self, *args, **kwargs)``
* ``transitions(self, year)`` - this is expected to return a tuple of
datetimes representing the DST on and off transitions in standard
time.
A fully initialized ``tzrangebase`` subclass should also provide the
following attributes:
* ``hasdst``: Boolean whether or not the zone uses DST.
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
representing the respective UTC offsets.
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
abbreviations in DST and STD, respectively.
* ``_hasdst``: Whether or not the zone has DST.
.. versionadded:: 2.6.0
"""
def __init__(self):
raise NotImplementedError('tzrangebase is an abstract base class')
def utcoffset(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_base_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold)
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset)
def _isdst(self, dt):
if not self.hasdst:
return False
elif dt is None:
return None
transitions = self.transitions(dt.year)
if transitions is None:
return False
dt = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt, transitions)
# Handle ambiguous dates
if not isdst and self.is_ambiguous(dt):
return not self._fold(dt)
else:
return isdst
def _naive_isdst(self, dt, transitions):
dston, dstoff = transitions
dt = dt.replace(tzinfo=None)
if dston < dstoff:
isdst = dston <= dt < dstoff
else:
isdst = not dstoff <= dt < dston
return isdst
@property
def _dst_base_offset(self):
return self._dst_offset - self._std_offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
venv\Lib\site-packages\dateutil\tz\_factories.py
from datetime import timedelta
import weakref
from collections import OrderedDict
from six.moves import _thread
class _TzSingleton(type):
def __init__(cls, *args, **kwargs):
cls.__instance = None
super(_TzSingleton, cls).__init__(*args, **kwargs)
def __call__(cls):
if cls.__instance is None:
cls.__instance = super(_TzSingleton, cls).__call__()
return cls.__instance
class _TzFactory(type):
def instance(cls, *args, **kwargs):
"""Alternate constructor that returns a fresh instance"""
return type.__call__(cls, *args, **kwargs)
class _TzOffsetFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = weakref.WeakValueDictionary()
cls.__strong_cache = OrderedDict()
cls.__strong_cache_size = 8
cls._cache_lock = _thread.allocate_lock()
def __call__(cls, name, offset):
if isinstance(offset, timedelta):
key = (name, offset.total_seconds())
else:
key = (name, offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(name, offset))
# This lock may not be necessary in Python 3. See GH issue #901
with cls._cache_lock:
cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
# Remove an item if the strong cache is overpopulated
if len(cls.__strong_cache) > cls.__strong_cache_size:
cls.__strong_cache.popitem(last=False)
return instance
class _TzStrFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = weakref.WeakValueDictionary()
cls.__strong_cache = OrderedDict()
cls.__strong_cache_size = 8
cls.__cache_lock = _thread.allocate_lock()
def __call__(cls, s, posix_offset=False):
key = (s, posix_offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(s, posix_offset))
# This lock may not be necessary in Python 3. See GH issue #901
with cls.__cache_lock:
cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
# Remove an item if the strong cache is overpopulated
if len(cls.__strong_cache) > cls.__strong_cache_size:
cls.__strong_cache.popitem(last=False)
return instance
venv\Lib\site-packages\dateutil\tz\__init__.py
# -*- coding: utf-8 -*-
from .tz import *
from .tz import __doc__
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
"enfold", "datetime_ambiguous", "datetime_exists",
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
class DeprecatedTzFormatWarning(Warning):
"""Warning raised when time zones are parsed from deprecated formats."""
import logging
import os
import tempfile
import shutil
import json
from subprocess import check_call, check_output
from tarfile import TarFile
from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ``ftp.iana.org/tz``.
"""
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
try:
with TarFile.open(filename) as tf:
for name in zonegroups:
tf.extract(name, tmpdir)
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
_run_zic(zonedir, filepaths)
# write metadata file
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
json.dump(metadata, f, indent=4, sort_keys=True)
target = os.path.join(moduledir, ZONEFILENAME)
with TarFile.open(target, "w:%s" % format) as tf:
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
finally:
shutil.rmtree(tmpdir)
def _run_zic(zonedir, filepaths):
"""Calls the ``zic`` compiler in a compatible way to get a "fat" binary.
Recent versions of ``zic`` default to ``-b slim``, while older versions
don't even have the ``-b`` option (but default to "fat" binaries). The
current version of dateutil does not support Version 2+ TZif files, which
causes problems when used in conjunction with "slim" binaries, so this
function is used to ensure that we always get a "fat" binary.
"""
try:
help_text = check_output(["zic", "--help"])
except OSError as e:
_print_on_nosuchfile(e)
raise
if b"-b " in help_text:
bloat_args = ["-b", "fat"]
else:
bloat_args = []
check_call(["zic"] + bloat_args + ["-d", zonedir] + filepaths)
def _print_on_nosuchfile(e):
"""Print helpful troubleshooting message
e is an exception raised by subprocess.check_call()
"""
if e.errno == 2:
logging.error(
"Could not find zic. Perhaps you need to install "
"libc-bin or some other package that provides it, "
"or it's not in your PATH?")
# -*- coding: utf-8 -*-
import warnings
import json
from tarfile import TarFile
from pkgutil import get_data
from io import BytesIO
from dateutil.tz import tzfile as _tzfile
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
METADATA_FN = 'METADATA'
class tzfile(_tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile_stream():
try:
return BytesIO(get_data(__name__, ZONEFILENAME))
except IOError as e: # TODO switch to FileNotFoundError?
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
if zonefile_stream is not None:
with TarFile.open(fileobj=zonefile_stream) as tf:
self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
for zf in tf.getmembers()
if zf.isfile() and zf.name != METADATA_FN}
# deal with links: They'll point to their parent object. Less
# waste of memory
links = {zl.name: self.zones[zl.linkname]
for zl in tf.getmembers() if
zl.islnk() or zl.issym()}
self.zones.update(links)
try:
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
metadata_str = metadata_json.read().decode('UTF-8')
self.metadata = json.loads(metadata_str)
except KeyError:
# no metadata in tar file
self.metadata = None
else:
self.zones = {}
self.metadata = None
def get(self, name, default=None):
"""
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
for retrieving zones from the zone dictionary.
:param name:
The name of the zone to retrieve. (Generally IANA zone names)
:param default:
The value to return in the event of a missing key.
.. versionadded:: 2.6.0
"""
return self.zones.get(name, default)
# The current API has gettz as a module function, although in fact it taps into
# a stateful class. So as a workaround for now, without changing the API, we
# will create a new "global" class instance the first time a user requests a
# timezone. Ugly, but adheres to the api.
#
# TODO: Remove after deprecation period.
_CLASS_ZONE_INSTANCE = []
def get_zonefile_instance(new_instance=False):
"""
This is a convenience function which provides a :class:`ZoneInfoFile`
instance using the data provided by the ``dateutil`` package. By default, it
caches a single instance of the ZoneInfoFile object and returns that.
:param new_instance:
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
used as the cached instance for the next call. Otherwise, new instances
are created only as necessary.
:return:
Returns a :class:`ZoneInfoFile` object.
.. versionadded:: 2.6
"""
if new_instance:
zif = None
else:
zif = getattr(get_zonefile_instance, '_cached_instance', None)
if zif is None:
zif = ZoneInfoFile(getzoneinfofile_stream())
get_zonefile_instance._cached_instance = zif
return zif
def gettz(name):
"""
This retrieves a time zone from the local zoneinfo tarball that is packaged
with dateutil.
:param name:
An IANA-style time zone name, as found in the zoneinfo file.
:return:
Returns a :class:`dateutil.tz.tzfile` time zone object.
.. warning::
It is generally inadvisable to use this function, and it is only
provided for API compatibility with earlier versions. This is *not*
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
time zone based on the inputs, favoring system zoneinfo. This is ONLY
for accessing the dateutil-specific zoneinfo (which may be out of
date compared to the system zoneinfo).
.. deprecated:: 2.6
If you need to use a specific zoneinfofile over the system zoneinfo,
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
Use :func:`get_zonefile_instance` to retrieve an instance of the
dateutil-provided zoneinfo.
"""
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
"to use the dateutil-provided zoneinfo files, instantiate a "
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
def gettz_db_metadata():
""" Get the zonefile metadata
See `zonefile_metadata`_
:returns:
A dictionary with the database metadata
.. deprecated:: 2.6
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
"""
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
"versions, to use the dateutil-provided zoneinfo files, "
"ZoneInfoFile object and query the 'metadata' attribute "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].metadata
"""CFF2 to CFF converter."""
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.cffLib import (
TopDictIndex,
buildOrder,
buildDefaults,
topDictOperators,
privateDictOperators,
)
from .width import optimizeWidths
from collections import defaultdict
import logging
__all__ = ["convertCFF2ToCFF", "main"]
log = logging.getLogger("fontTools.cffLib")
def _convertCFF2ToCFF(cff, otFont):
"""Converts this object from CFF2 format to CFF format. This conversion
is done 'in-place'. The conversion cannot be reversed.
The CFF2 font cannot be variable. (TODO Accept those and convert to the
default instance?)
This assumes a decompiled CFF table. (i.e. that the object has been
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
cff.major = 1
topDictData = TopDictIndex(None)
for item in cff.topDictIndex:
# Iterate over, such that all are decompiled
item.cff2GetGlyphOrder = None
topDictData.append(item)
cff.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, "VarStore"):
raise ValueError("Variable CFF2 font cannot be converted to CFF format.")
opOrder = buildOrder(topDictOperators)
topDict.order = opOrder
for key in topDict.rawDict.keys():
if key not in opOrder:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
fdArray = topDict.FDArray
charStrings = topDict.CharStrings
defaults = buildDefaults(privateDictOperators)
order = buildOrder(privateDictOperators)
for fd in fdArray:
fd.setCFF2(False)
privateDict = fd.Private
privateDict.order = order
for key in order:
if key not in privateDict.rawDict and key in defaults:
privateDict.rawDict[key] = defaults[key]
for key in privateDict.rawDict.keys():
if key not in order:
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
for cs in charStrings.values():
cs.decompile()
cs.program.append("endchar")
for subrSets in [cff.GlobalSubrs] + [
getattr(fd.Private, "Subrs", []) for fd in fdArray
]:
for cs in subrSets:
cs.program.append("return")
# Add (optimal) width to CharStrings that need it.
widths = defaultdict(list)
metrics = otFont["hmtx"].metrics
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
if fdIndex == None:
fdIndex = 0
widths[fdIndex].append(metrics[glyphName][0])
for fdIndex, widthList in widths.items():
bestDefault, bestNominal = optimizeWidths(widthList)
private = fdArray[fdIndex].Private
private.defaultWidthX = bestDefault
private.nominalWidthX = bestNominal
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
if fdIndex == None:
fdIndex = 0
private = fdArray[fdIndex].Private
width = metrics[glyphName][0]
if width != private.defaultWidthX:
cs.program.insert(0, width - private.nominalWidthX)
mapping = {
name: ("cid" + str(n) if n else ".notdef")
for n, name in enumerate(topDict.charset)
}
topDict.charset = [
"cid" + str(n) if n else ".notdef" for n in range(len(topDict.charset))
]
charStrings.charStrings = {
mapping[name]: v for name, v in charStrings.charStrings.items()
}
# I'm not sure why the following is *not* necessary. And it breaks
# the output if I add it.
# topDict.ROS = ("Adobe", "Identity", 0)
def convertCFF2ToCFF(font, *, updatePostTable=True):
cff = font["CFF2"].cff
_convertCFF2ToCFF(cff, font)
del font["CFF2"]
table = font["CFF "] = newTable("CFF ")
table.cff = cff
if updatePostTable and "post" in font:
# Only version supported for fonts with CFF table is 0x00030000 not 0x20000
post = font["post"]
if post.formatType == 2.0:
post.formatType = 3.0
def main(args=None):
"""Convert CFF OTF font to CFF2 OTF font"""
if args is None:
import sys
args = sys.argv[1:]
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.CFFToCFF2",
description="Upgrade a CFF font to CFF2.",
)
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
)
parser.add_argument(
"--no-recalc-timestamp",
dest="recalc_timestamp",
action="store_false",
help="Don't set the output font's timestamp to the current time.",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
from fontTools import configLogger
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
import os
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
outfile = (
makeOutputFileName(infile, overWrite=True, suffix="-CFF")
if not options.output
else options.output
)
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
convertCFF2ToCFF(font)
log.info(
"Saving %s",
outfile,
)
font.save(outfile)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))
"""CFF to CFF2 converter."""
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.misc.psCharStrings import T2WidthExtractor
from fontTools.cffLib import (
TopDictIndex,
FDArrayIndex,
FontDict,
buildOrder,
topDictOperators,
privateDictOperators,
topDictOperators2,
privateDictOperators2,
)
from io import BytesIO
import logging
__all__ = ["convertCFFToCFF2", "main"]
log = logging.getLogger("fontTools.cffLib")
class _NominalWidthUsedError(Exception):
def __add__(self, other):
raise self
def __radd__(self, other):
raise self
def _convertCFFToCFF2(cff, otFont):
"""Converts this object from CFF format to CFF2 format. This conversion
is done 'in-place'. The conversion cannot be reversed.
This assumes a decompiled CFF table. (i.e. that the object has been
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
# Clean up T2CharStrings
topDict = cff.topDictIndex[0]
fdArray = topDict.FDArray if hasattr(topDict, "FDArray") else None
charStrings = topDict.CharStrings
globalSubrs = cff.GlobalSubrs
localSubrs = (
[getattr(fd.Private, "Subrs", []) for fd in fdArray]
if fdArray
else (
[topDict.Private.Subrs]
if hasattr(topDict, "Private") and hasattr(topDict.Private, "Subrs")
else []
)
)
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
cs.decompile()
# Clean up subroutines first
for subrs in [globalSubrs] + localSubrs:
for subr in subrs:
program = subr.program
i = j = len(program)
try:
i = program.index("return")
except ValueError:
pass
try:
j = program.index("endchar")
except ValueError:
pass
program[min(i, j) :] = []
# Clean up glyph charstrings
removeUnusedSubrs = False
nominalWidthXError = _NominalWidthUsedError()
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
program = cs.program
thisLocalSubrs = (
localSubrs[fdIndex]
if fdIndex is not None
else (
getattr(topDict.Private, "Subrs", [])
if hasattr(topDict, "Private")
else []
)
)
# Intentionally use custom type for nominalWidthX, such that any
# CharString that has an explicit width encoded will throw back to us.
extractor = T2WidthExtractor(
thisLocalSubrs,
globalSubrs,
nominalWidthXError,
0,
)
try:
extractor.execute(cs)
except _NominalWidthUsedError:
# Program has explicit width. We want to drop it, but can't
# just pop the first number since it may be a subroutine call.
# Instead, when seeing that, we embed the subroutine and recurse.
# If this ever happened, we later prune unused subroutines.
while len(program) >= 2 and program[1] in ["callsubr", "callgsubr"]:
removeUnusedSubrs = True
subrNumber = program.pop(0)
assert isinstance(subrNumber, int), subrNumber
op = program.pop(0)
bias = extractor.localBias if op == "callsubr" else extractor.globalBias
subrNumber += bias
subrSet = thisLocalSubrs if op == "callsubr" else globalSubrs
subrProgram = subrSet[subrNumber].program
program[:0] = subrProgram
# Now pop the actual width
assert len(program) >= 1, program
program.pop(0)
if program and program[-1] == "endchar":
program.pop()
if removeUnusedSubrs:
cff.remove_unused_subroutines()
# Upconvert TopDict
cff.major = 2
cff2GetGlyphOrder = cff.otFont.getGlyphOrder
topDictData = TopDictIndex(None, cff2GetGlyphOrder)
for item in cff.topDictIndex:
# Iterate over, such that all are decompiled
topDictData.append(item)
cff.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, "Private"):
privateDict = topDict.Private
else:
privateDict = None
opOrder = buildOrder(topDictOperators2)
topDict.order = opOrder
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
if not hasattr(topDict, "FDArray"):
fdArray = topDict.FDArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = topDict.GlobalSubrs
topDict.GlobalSubrs.fdArray = fdArray
charStrings = topDict.CharStrings
if charStrings.charStringsAreIndexed:
charStrings.charStringsIndex.fdArray = fdArray
else:
charStrings.fdArray = fdArray
fontDict = FontDict()
fontDict.setCFF2(True)
fdArray.append(fontDict)
fontDict.Private = privateDict
privateOpOrder = buildOrder(privateDictOperators2)
if privateDict is not None:
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
else:
# clean up the PrivateDicts in the fdArray
fdArray = topDict.FDArray
privateOpOrder = buildOrder(privateDictOperators2)
for fontDict in fdArray:
fontDict.setCFF2(True)
for key in list(fontDict.rawDict.keys()):
if key not in fontDict.order:
del fontDict.rawDict[key]
if hasattr(fontDict, key):
delattr(fontDict, key)
privateDict = fontDict.Private
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in list(privateDict.rawDict.keys()):
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
# Now delete up the deprecated topDict operators from CFF 1.0
for entry in topDictOperators:
key = entry[1]
# We seem to need to keep the charset operator for now,
# or we fail to compile with some fonts, like AdditionFont.otf.
# I don't know which kind of CFF font those are. But keeping
# charset seems to work. It will be removed when we save and
# read the font again.
#
# AdditionFont.otf has .
if key == "charset":
continue
if key not in opOrder:
if key in topDict.rawDict:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
# TODO(behdad): What does the following comment even mean? Both CFF and CFF2
# use the same T2Charstring class. I *think* what it means is that the CharStrings
# were loaded for CFF1, and we need to reload them for CFF2 to set varstore, etc
# on them. At least that's what I understand. It's probably safe to remove this
# and just set vstore where needed.
#
# See comment above about charset as well.
# At this point, the Subrs and Charstrings are all still T2Charstring class
# easiest to fix this by compiling, then decompiling again
file = BytesIO()
cff.compile(file, otFont, isCFF2=True)
file.seek(0)
cff.decompile(file, otFont, isCFF2=True)
def convertCFFToCFF2(font):
cff = font["CFF "].cff
del font["CFF "]
_convertCFFToCFF2(cff, font)
table = font["CFF2"] = newTable("CFF2")
table.cff = cff
def main(args=None):
"""Convert CFF OTF font to CFF2 OTF font"""
if args is None:
import sys
args = sys.argv[1:]
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.CFFToCFF2",
description="Upgrade a CFF font to CFF2.",
)
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
)
parser.add_argument(
"--no-recalc-timestamp",
dest="recalc_timestamp",
action="store_false",
help="Don't set the output font's timestamp to the current time.",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
from fontTools import configLogger
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
import os
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
outfile = (
makeOutputFileName(infile, overWrite=True, suffix="-CFF2")
if not options.output
else options.output
)
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
convertCFFToCFF2(font)
log.info(
"Saving %s",
outfile,
)
font.save(outfile)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))
# -*- coding: utf-8 -*-
"""T2CharString operator specializer and generalizer.
PostScript glyph drawing operations can be expressed in multiple different
ways. For example, as well as the ``lineto`` operator, there is also a
``hlineto`` operator which draws a horizontal line, removing the need to
specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a
vertical line, removing the need to specify a ``dy`` coordinate. As well
as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects
into lists of operations, this module allows for conversion between general
and specific forms of the operation.
"""
from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
if isinstance(string, str):
string = string.split()
program = []
for token in string:
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
pass
program.append(token)
return program
def programToString(program):
return " ".join(str(x) for x in program)
def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
program (🤷).
'getNumRegions' may be None, or a callable object. It must return the
number of regions. 'getNumRegions' takes a single argument, vsindex. It
returns the numRegions for the vsindex.
The Charstring may or may not start with a width value. If the first
non-blend operator has an odd number of arguments, then the first argument is
a width, and is popped off. This is complicated with blend operators, as
there may be more than one before the first hint or moveto operator, and each
one reduces several arguments to just one list argument. We have to sum the
number of arguments that are not part of the blend arguments, and all the
'numBlends' values. We could instead have said that by definition, if there
is a blend operator, there is no width value, since CFF2 Charstrings don't
have width values. I discussed this with Behdad, and we are allowing for an
initial width value in this case because developers may assemble a CFF2
charstring from CFF Charstrings, which could have width values.
"""
seenWidthOp = False
vsIndex = 0
lenBlendStack = 0
lastBlendIndex = 0
commands = []
stack = []
it = iter(program)
for token in it:
if not isinstance(token, str):
stack.append(token)
continue
if token == "blend":
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
# containing all the blend op args.
numBlends = stack[-1]
numBlendArgs = numBlends * numSourceFonts + 1
# replace first blend op by a list of the blend ops.
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
lenStack = len(stack)
lenBlendStack += numBlends + lenStack - 1
lastBlendIndex = lenStack
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == "vsindex":
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {
"hstem",
"hstemhm",
"vstem",
"vstemhm",
"cntrmask",
"hintmask",
"hmoveto",
"vmoveto",
"rmoveto",
"endchar",
}:
seenWidthOp = True
parity = token in {"hmoveto", "vmoveto"}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
# args following the last blend arg.
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
else:
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(("", [width]))
if token in {"hintmask", "cntrmask"}:
if stack:
commands.append(("", stack))
commands.append((token, []))
commands.append(("", [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(("", stack))
return commands
def _flattenBlendArgs(args):
token_list = []
for arg in args:
if isinstance(arg, list):
token_list.extend(arg)
token_list.append("blend")
else:
token_list.append(arg)
return token_list
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op, args in commands:
if any(isinstance(arg, list) for arg in args):
args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
return program
def _everyN(el, n):
"""Group the list el into groups of size n"""
l = len(el)
if l % n != 0:
raise ValueError(el)
for i in range(0, l, n):
yield el[i : i + n]
class _GeneralizerDecombinerCommandsMap(object):
@staticmethod
def rmoveto(args):
if len(args) != 2:
raise ValueError(args)
yield ("rmoveto", args)
@staticmethod
def hmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [args[0], 0])
@staticmethod
def vmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [0, args[0]])
@staticmethod
def rlineto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 2):
yield ("rlineto", args)
@staticmethod
def hlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [next(it), 0])
yield ("rlineto", [0, next(it)])
except StopIteration:
pass
@staticmethod
def vlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [0, next(it)])
yield ("rlineto", [next(it), 0])
except StopIteration:
pass
@staticmethod
def rrcurveto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 6):
yield ("rrcurveto", args)
@staticmethod
def hhcurveto(args):
l = len(args)
if l < 4 or l % 4 > 1:
raise ValueError(args)
if l % 2 == 1:
yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
@staticmethod
def vvcurveto(args):
l = len(args)
if l < 4 or l % 4 > 1:
raise ValueError(args)
if l % 2 == 1:
yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
@staticmethod
def hvcurveto(args):
l = len(args)
if l < 4 or l % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if l % 2 == 1:
lastStraight = l % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
else:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
@staticmethod
def vhcurveto(args):
l = len(args)
if l < 4 or l % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if l % 2 == 1:
lastStraight = l % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
else:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
@staticmethod
def rcurveline(args):
l = len(args)
if l < 8 or l % 6 != 2:
raise ValueError(args)
args, last_args = args[:-2], args[-2:]
for args in _everyN(args, 6):
yield ("rrcurveto", args)
yield ("rlineto", last_args)
@staticmethod
def rlinecurve(args):
l = len(args)
if l < 8 or l % 2 != 0:
raise ValueError(args)
args, last_args = args[:-6], args[-6:]
for args in _everyN(args, 2):
yield ("rlineto", args)
yield ("rrcurveto", last_args)
def _convertBlendOpToArgs(blendList):
# args is list of blend op args. Since we are supporting
# recursive blend op calls, some of these args may also
# be a list of blend op args, and need to be converted before
# we convert the current list.
if any([isinstance(arg, list) for arg in blendList]):
args = [
i
for e in blendList
for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
]
else:
args = blendList
# We now know that blendList contains a blend op argument list, even if
# some of the args are lists that each contain a blend op argument list.
# Convert from:
# [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
# to:
# [ [x0] + [delta tuple for x0],
# ...,
# [xn] + [delta tuple for xn] ]
numBlends = args[-1]
# Can't use args.pop() when the args are being used in a nested list
# comprehension. See calling context
args = args[:-1]
l = len(args)
numRegions = l // numBlends - 1
if not (numBlends * (numRegions + 1) == l):
raise ValueError(blendList)
defaultArgs = [[arg] for arg in args[:numBlends]]
deltaArgs = args[numBlends:]
numDeltaValues = len(deltaArgs)
deltaList = [
deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
]
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
return blend_args
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
for op, args in commands:
# First, generalize any blend args in the arg list.
if any([isinstance(arg, list) for arg in args]):
try:
args = [
n
for arg in args
for n in (
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
)
]
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
func = getattr(mapping, op, None)
if func is None:
result.append((op, args))
continue
try:
for command in func(args):
result.append(command)
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
return result
def generalizeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
)
def _categorizeVector(v):
"""
Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
it returns a single zero still.
>>> _categorizeVector((0,0))
('0', (0,))
>>> _categorizeVector((1,0))
('h', (1,))
>>> _categorizeVector((0,2))
('v', (2,))
>>> _categorizeVector((1,2))
('r', (1, 2))
"""
if not v[0]:
if not v[1]:
return "0", v[:1]
else:
return "v", v[1:]
else:
if not v[1]:
return "h", v[:1]
else:
return "r", v
def _mergeCategories(a, b):
if a == "0":
return b
if b == "0":
return a
if a == b:
return a
return None
def _negateCategory(a):
if a == "h":
return "v"
if a == "v":
return "h"
assert a in "0r"
return a
def _convertToBlendCmds(args):
# return a list of blend commands, and
# the remaining non-blended args, if any.
num_args = len(args)
stack_use = 0
new_args = []
i = 0
while i < num_args:
arg = args[i]
i += 1
if not isinstance(arg, list):
new_args.append(arg)
stack_use += 1
else:
prev_stack_use = stack_use
# The arg is a tuple of blend values.
# These are each (master 0,delta 1..delta n, 1)
# Combine as many successive tuples as we can,
# up to the max stack limit.
num_sources = len(arg) - 1
blendlist = [arg]
stack_use += 1 + num_sources # 1 for the num_blends arg
# if we are here, max stack is the CFF2 max stack.
# I use the CFF2 max stack limit here rather than
# the 'maxstack' chosen by the client, as the default
# maxstack may have been used unintentionally. For all
# the other operators, this just produces a little less
# optimization, but here it puts a hard (and low) limit
# on the number of source fonts that can be used.
#
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
while (
(i < num_args)
and isinstance(args[i], list)
and stack_use + num_sources < maxStackLimit
):
blendlist.append(args[i])
i += 1
stack_use += num_sources
# blendList now contains as many single blend tuples as can be
# combined without exceeding the CFF2 stack limit.
num_blends = len(blendlist)
# append the 'num_blends' default font values
blend_args = []
for arg in blendlist:
blend_args.append(arg[0])
for arg in blendlist:
assert arg[-1] == 1
blend_args.extend(arg[1:-1])
blend_args.append(num_blends)
new_args.append(blend_args)
stack_use = prev_stack_use + num_blends
return new_args
def _addArgs(a, b):
if isinstance(b, list):
if isinstance(a, list):
if len(a) != len(b) or a[-1] != b[-1]:
raise ValueError()
return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
else:
a, b = b, a
if isinstance(a, list):
assert a[-1] == 1
return [_addArgs(a[0], b)] + a[1:]
return a + b
def _argsStackUse(args):
stackLen = 0
maxLen = 0
for arg in args:
if type(arg) is list:
# Blended arg
maxLen = max(maxLen, stackLen + _argsStackUse(arg))
stackLen += arg[-1]
else:
stackLen += 1
return max(stackLen, maxLen)
def specializeCommands(
commands,
ignoreErrors=False,
generalizeFirst=True,
preserveTopology=False,
maxstack=48,
):
# We perform several rounds of optimizations. They are carefully ordered and are:
#
# 0. Generalize commands.
# This ensures that they are in our expected simple form, with each line/curve only
# having arguments for one segment, and using the generic form (rlineto/rrcurveto).
# If caller is sure the input is in this form, they can turn off generalization to
# save time.
#
# 1. Combine successive rmoveto operations.
#
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
# We specialize into some, made-up, variants as well, which simplifies following
# passes.
#
# 3. Merge or delete redundant operations, to the extent requested.
# OpenType spec declares point numbers in CFF undefined. As such, we happily
# change topology. If client relies on point numbers (in GPOS anchors, or for
# hinting purposes(what?)) they can turn this off.
#
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
#
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
#
# 6. Resolve any remaining made-up operators into real operators.
#
# I have convinced myself that this produces optimal bytecode (except for, possibly
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
# A dynamic-programming approach can do the same but would be significantly slower.
#
# 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
if generalizeFirst:
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
else:
commands = list(commands) # Make copy since we modify in-place later.
# 1. Combine successive rmoveto operations.
for i in range(len(commands) - 1, 0, -1):
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
v1, v2 = commands[i - 1][1], commands[i][1]
commands[i - 1] = (
"rmoveto",
[_addArgs(v1[0], v2[0]), _addArgs(v1[1], v2[1])],
)
del commands[i]
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
#
# We, in fact, specialize into more, made-up, variants that special-case when both
# X and Y components are zero. This simplifies the following optimization passes.
# This case is rare, but OCD does not let me skip it.
#
# After this round, we will have four variants that use the following mnemonics:
#
# - 'r' for relative, ie. non-zero X and non-zero Y,
# - 'h' for horizontal, ie. zero X and non-zero Y,
# - 'v' for vertical, ie. non-zero X and zero Y,
# - '0' for zeros, ie. zero X and zero Y.
#
# The '0' pseudo-operators are not part of the spec, but help simplify the following
# optimization rounds. We resolve them at the end. So, after this, we will have four
# moveto and four lineto variants:
#
# - 0moveto, 0lineto
# - hmoveto, hlineto
# - vmoveto, vlineto
# - rmoveto, rlineto
#
# and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
# dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
# An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
#
# There are nine different variants of curves without the '0'. Those nine map exactly
# to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
# vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
# arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
# arguments) is in fact an rhcurveto. The operators in the spec are designed such that
# all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
#
# Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
# of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
# thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
# encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
# the '0' with either 'h' or 'v' and it works.
#
# When we get to curve splines however, things become more complicated... XXX finish this.
# There's one more complexity with splines. If one side of the spline is not horizontal or
# vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
# Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
# only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
# This limits our merge opportunities later.
#
for i in range(len(commands)):
op, args = commands[i]
if op in {"rmoveto", "rlineto"}:
c, args = _categorizeVector(args)
commands[i] = c + op[1:], args
continue
if op == "rrcurveto":
c1, args1 = _categorizeVector(args[:2])
c2, args2 = _categorizeVector(args[-2:])
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
continue
# 3. Merge or delete redundant operations, to the extent requested.
#
# TODO
# A 0moveto that comes before all other path operations can be removed.
# though I find conflicting evidence for this.
#
# TODO
# "If hstem and vstem hints are both declared at the beginning of a
# CharString, and this sequence is followed directly by the hintmask or
# cntrmask operators, then the vstem hint operator (or, if applicable,
# the vstemhm operator) need not be included."
#
# "The sequence and form of a CFF2 CharString program may be represented as:
# {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
#
# https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
#
# For Type2 CharStrings the sequence is:
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
# Some other redundancies change topology (point numbers).
if not preserveTopology:
for i in range(len(commands) - 1, -1, -1):
op, args = commands[i]
# A 00curveto is demoted to a (specialized) lineto.
if op == "00curveto":
assert len(args) == 4
c, args = _categorizeVector(args[1:3])
op = c + "lineto"
commands[i] = op, args
# and then...
# A 0lineto can be deleted.
if op == "0lineto":
del commands[i]
continue
# Merge adjacent hlineto's and vlineto's.
# In CFF2 charstrings from variable fonts, each
# arg item may be a list of blendable values, one from
# each source font.
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
_, other_args = commands[i - 1]
assert len(args) == 1 and len(other_args) == 1
try:
new_args = [_addArgs(args[0], other_args[0])]
except ValueError:
continue
commands[i - 1] = (op, new_args)
del commands[i]
continue
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
for i in range(1, len(commands) - 1):
op, args = commands[i]
prv, nxt = commands[i - 1][0], commands[i + 1][0]
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
assert len(args) == 1
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
commands[i] = ("rlineto", args)
continue
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
assert (op[0] == "r") ^ (op[1] == "r")
if op[0] == "v":
pos = 0
elif op[0] != "r":
pos = 1
elif op[1] == "v":
pos = 4
else:
pos = 5
# Insert, while maintaining the type of args (can be tuple or list).
args = args[:pos] + type(args)((0,)) + args[pos:]
commands[i] = ("rrcurveto", args)
continue
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
stackUse = _argsStackUse(commands[-1][1]) if commands else 0
for i in range(len(commands) - 1, 0, -1):
op1, args1 = commands[i - 1]
op2, args2 = commands[i]
new_op = None
# Merge logic...
if {op1, op2} <= {"rlineto", "rrcurveto"}:
if op1 == op2:
new_op = op1
else:
l = len(args2)
if op2 == "rrcurveto" and l == 6:
new_op = "rlinecurve"
elif l == 2:
new_op = "rcurveline"
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
new_op = op2
elif {op1, op2} == {"vlineto", "hlineto"}:
new_op = op1
elif "curveto" == op1[2:] == op2[2:]:
d0, d1 = op1[:2]
d2, d3 = op2[:2]
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
continue
d = _mergeCategories(d1, d2)
if d is None:
continue
if d0 == "r":
d = _mergeCategories(d, d3)
if d is None:
continue
new_op = "r" + d + "curveto"
elif d3 == "r":
d0 = _mergeCategories(d0, _negateCategory(d))
if d0 is None:
continue
new_op = d0 + "r" + "curveto"
else:
d0 = _mergeCategories(d0, d3)
if d0 is None:
continue
new_op = d0 + d + "curveto"
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
args1StackUse = _argsStackUse(args1)
combinedStackUse = max(args1StackUse, len(args1) + stackUse)
if new_op and combinedStackUse < maxstack:
commands[i - 1] = (new_op, args1 + args2)
del commands[i]
stackUse = combinedStackUse
else:
stackUse = args1StackUse
# 6. Resolve any remaining made-up operators into real operators.
for i in range(len(commands)):
op, args = commands[i]
if op in {"0moveto", "0lineto"}:
commands[i] = "h" + op[1:], args
continue
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
l = len(args)
op0, op1 = op[:2]
if (op0 == "r") ^ (op1 == "r"):
assert l % 2 == 1
if op0 == "0":
op0 = "h"
if op1 == "0":
op1 = "h"
if op0 == "r":
op0 = op1
if op1 == "r":
op1 = _negateCategory(op0)
assert {op0, op1} <= {"h", "v"}, (op0, op1)
if l % 2:
if op0 != op1: # vhcurveto / hvcurveto
if (op0 == "h") ^ (l % 8 == 1):
# Swap last two args order
args = args[:-2] + args[-1:] + args[-2:-1]
else: # hhcurveto / vvcurveto
if op0 == "h": # hhcurveto
# Swap first two args order
args = args[1:2] + args[:1] + args[2:]
commands[i] = op0 + op1 + "curveto", args
continue
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
for i in range(len(commands)):
op, args = commands[i]
if any(isinstance(arg, list) for arg in args):
commands[i] = op, _convertToBlendCmds(args)
return commands
def specializeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.specializer",
description="CFF CharString generalizer/specializer",
)
parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
parser.add_argument(
"--num-regions",
metavar="NumRegions",
nargs="*",
default=None,
help="Number of variable-font regions for blend opertaions.",
)
parser.add_argument(
"--font",
metavar="FONTFILE",
default=None,
help="CFF2 font to specialize.",
)
parser.add_argument(
"-o",
"--output-file",
type=str,
help="Output font file name.",
)
options = parser.parse_args(sys.argv[1:])
if options.program:
getNumRegions = (
None
if options.num_regions is None
else lambda vsIndex: int(
options.num_regions[0 if vsIndex is None else vsIndex]
)
)
program = stringToProgram(options.program)
print("Program:")
print(programToString(program))
commands = programToCommands(program, getNumRegions)
print("Commands:")
print(commands)
program2 = commandsToProgram(commands)
print("Program from commands:")
print(programToString(program2))
assert program == program2
print("Generalized program:")
print(programToString(generalizeProgram(program, getNumRegions)))
print("Specialized program:")
print(programToString(specializeProgram(program, getNumRegions)))
if options.font:
from fontTools.ttLib import TTFont
font = TTFont(options.font)
cff2 = font["CFF2"].cff.topDictIndex[0]
charstrings = cff2.CharStrings
for glyphName in charstrings.keys():
charstring = charstrings[glyphName]
charstring.decompile()
getNumRegions = charstring.private.getNumRegions
charstring.program = specializeProgram(
charstring.program, getNumRegions, maxstack=maxStackLimit
)
if options.output_file is None:
from fontTools.misc.cliTools import makeOutputFileName
outfile = makeOutputFileName(
options.font, overWrite=True, suffix=".specialized"
)
else:
outfile = options.output_file
if outfile:
print("Saving", outfile)
font.save(outfile)
from fontTools.misc.psCharStrings import (
SimpleT2Decompiler,
T2WidthExtractor,
calcSubrBias,
)
def _uniq_sort(l):
return sorted(set(l))
class StopHintCountEvent(Exception):
pass
class _DesubroutinizingT2Decompiler(SimpleT2Decompiler):
stop_hintcount_ops = (
"op_hintmask",
"op_cntrmask",
"op_rmoveto",
"op_hmoveto",
"op_vmoveto",
)
def __init__(self, localSubrs, globalSubrs, private=None):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
def execute(self, charString):
self.need_hintcount = True # until proven otherwise
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, self.stop_hint_count)
if hasattr(charString, "_desubroutinized"):
# If a charstring has already been desubroutinized, we will still
# need to execute it if we need to count hints in order to
# compute the byte length for mask arguments, and haven't finished
# counting hints pairs.
if self.need_hintcount and self.callingStack:
try:
SimpleT2Decompiler.execute(self, charString)
except StopHintCountEvent:
del self.callingStack[-1]
return
charString._patches = []
SimpleT2Decompiler.execute(self, charString)
desubroutinized = charString.program[:]
for idx, expansion in reversed(charString._patches):
assert idx >= 2
assert desubroutinized[idx - 1] in [
"callsubr",
"callgsubr",
], desubroutinized[idx - 1]
assert type(desubroutinized[idx - 2]) == int
if expansion[-1] == "return":
expansion = expansion[:-1]
desubroutinized[idx - 2 : idx] = expansion
if not self.private.in_cff2:
if "endchar" in desubroutinized:
# Cut off after first endchar
desubroutinized = desubroutinized[
: desubroutinized.index("endchar") + 1
]
charString._desubroutinized = desubroutinized
del charString._patches
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def stop_hint_count(self, *args):
self.need_hintcount = False
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, None)
cs = self.callingStack[-1]
if hasattr(cs, "_desubroutinized"):
raise StopHintCountEvent()
def op_hintmask(self, index):
SimpleT2Decompiler.op_hintmask(self, index)
if self.need_hintcount:
self.stop_hint_count()
def processSubr(self, index, subr):
cs = self.callingStack[-1]
if not hasattr(cs, "_desubroutinized"):
cs._patches.append((index, subr._desubroutinized))
def desubroutinize(cff):
for fontName in cff.fontNames:
font = cff[fontName]
cs = font.CharStrings
for c in cs.values():
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
c.program = c._desubroutinized
del c._desubroutinized
# Delete all the local subrs
if hasattr(font, "FDArray"):
for fd in font.FDArray:
pd = fd.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
else:
pd = font.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
# as well as the global subrs
cff.GlobalSubrs.clear()
class _MarkingT2Decompiler(SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs, private):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
SimpleT2Decompiler.op_callsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(T2WidthExtractor):
class Hints(object):
def __init__(self):
# Whether calling this charstring produces any hint stems
# Note that if a charstring starts with hintmask, it will
# have has_hint set to True, because it *might* produce an
# implicit vstem if called under certain conditions.
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible.
# Only relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints
# continuing after this, or there might be
# other things. Not clear yet.
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
# List of indices of calls to empty subroutines to remove.
self.deletions = []
pass
def __init__(
self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
):
self._css = css
T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
)
self.private = private
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, "_hints") else None
charString._hints = self.Hints()
T2WidthExtractor.execute(self, charString)
hints = charString._hints
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in range(hints.last_checked, len(charString.program) - 1):
if isinstance(charString.program[i], str):
hints.status = 2
break
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if old_hints:
assert hints.__dict__ == old_hints.__dict__
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
T2WidthExtractor.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
T2WidthExtractor.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_hstem(self, index):
T2WidthExtractor.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
T2WidthExtractor.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
T2WidthExtractor.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
T2WidthExtractor.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
rv = T2WidthExtractor.op_hintmask(self, index)
self.processHintmask(index)
return rv
def op_cntrmask(self, index):
rv = T2WidthExtractor.op_cntrmask(self, index)
self.processHintmask(index)
return rv
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2:
# Check from last_check, see if we may be an implicit vstem
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
else:
# We are an implicit vstem
hints.has_hint = True
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
# Check from last_check, make sure we didn't have
# any operators.
if hints.status != 2:
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
hints.last_checked = index
if hints.status != 2:
if subr_hints.has_hint:
hints.has_hint = True
# Decide where to chop off from
if subr_hints.status == 0:
hints.last_hint = index
else:
hints.last_hint = index - 2 # Leave the subr call in
elif subr_hints.status == 0:
hints.deletions.append(index)
hints.status = max(hints.status, subr_hints.status)
def _cs_subset_subroutines(charstring, subrs, gsubrs):
p = charstring.program
for i in range(1, len(p)):
if p[i] == "callsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
elif p[i] == "callgsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = (
gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
)
def _cs_drop_hints(charstring):
hints = charstring._hints
if hints.deletions:
p = charstring.program
for idx in reversed(hints.deletions):
del p[idx - 2 : idx]
if hints.has_hint:
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
charstring.program = charstring.program[hints.last_hint :]
if not charstring.program:
# TODO CFF2 no need for endchar.
charstring.program.append("endchar")
if hasattr(charstring, "width"):
# Insert width back if needed
if charstring.width != charstring.private.defaultWidthX:
# For CFF2 charstrings, this should never happen
assert (
charstring.private.defaultWidthX is not None
), "CFF2 CharStrings must not have an initial width value"
charstring.program.insert(
0, charstring.width - charstring.private.nominalWidthX
)
if hints.has_hintmask:
i = 0
p = charstring.program
while i < len(p):
if p[i] in ["hintmask", "cntrmask"]:
assert i + 1 <= len(p)
del p[i : i + 2]
continue
i += 1
assert len(charstring.program)
del charstring._hints
def remove_hints(cff, *, removeUnusedSubrs: bool = True):
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such.
# Upon returning, for each charstring we note all the
# subroutine calls it makes that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
css = set()
for c in cs.values():
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(
css,
subrs,
c.globalSubrs,
c.private.nominalWidthX,
c.private.defaultWidthX,
c.private,
)
decompiler.execute(c)
c.width = decompiler.width
for charstring in css:
_cs_drop_hints(charstring)
del css
# Drop font-wide hinting values
all_privs = []
if hasattr(font, "FDArray"):
all_privs.extend(fd.Private for fd in font.FDArray)
else:
all_privs.append(font.Private)
for priv in all_privs:
for k in [
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
"BlueScale",
"BlueShift",
"BlueFuzz",
"StemSnapH",
"StemSnapV",
"StdHW",
"StdVW",
"ForceBold",
"LanguageGroup",
"ExpansionFactor",
]:
if hasattr(priv, k):
setattr(priv, k, None)
if removeUnusedSubrs:
remove_unused_subroutines(cff)
def _pd_delete_empty_subrs(private_dict):
if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
if "Subrs" in private_dict.rawDict:
del private_dict.rawDict["Subrs"]
del private_dict.Subrs
def remove_unused_subroutines(cff):
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Renumber subroutines to remove unused ones
# Mark all used subroutines
for c in cs.values():
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, "FDArray"):
all_subrs.extend(
fd.Private.Subrs
for fd in font.FDArray
if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
)
elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, "_used"):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = calcSubrBias(subrs)
subrs._new_bias = calcSubrBias(subrs._used)
# Renumber glyph charstrings
for c in cs.values():
subrs = getattr(c.private, "Subrs", None)
_cs_subset_subroutines(c, subrs, font.GlobalSubrs)
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
local_subrs = font.Private.Subrs
elif (
hasattr(font, "FDArray")
and len(font.FDArray) == 1
and hasattr(font.FDArray[0].Private, "Subrs")
):
# Technically we shouldn't do this. But I've run into fonts that do it.
local_subrs = font.FDArray[0].Private.Subrs
else:
local_subrs = None
else:
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
if hasattr(subrs, "file"):
del subrs.file
if hasattr(subrs, "offsets"):
del subrs.offsets
for subr in subrs.items:
_cs_subset_subroutines(subr, local_subrs, font.GlobalSubrs)
# Delete local SubrsIndex if empty
if hasattr(font, "FDArray"):
for fd in font.FDArray:
_pd_delete_empty_subrs(fd.Private)
else:
_pd_delete_empty_subrs(font.Private)
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias
venv\Lib\site-packages\fontTools\cffLib\width.py
# -*- coding: utf-8 -*-
"""T2CharString glyph width optimizer.
CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
value do not need to specify their width in their charstring, saving bytes.
This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
values for a font, when provided with a list of glyph widths."""
from fontTools.ttLib import TTFont
from collections import defaultdict
from operator import add
from functools import reduce
__all__ = ["optimizeWidths", "main"]
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start)
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
out = missingdict(missing)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w, freq in widths.items():
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int)
for w in widths:
d[w] += 1
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw + 1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
return bestDefault, bestNominal
def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw + 1))
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal - 108, nominal - 1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal + 108, nominal + 1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal
def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument(
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
)
parser.add_argument(
"-b",
"--brute-force",
dest="brute",
action="store_true",
help="Use brute-force approach (VERY slow)",
)
args = parser.parse_args(args)
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font["hmtx"]
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print(
"glyphs=%d default=%d nominal=%d byteCost=%d"
% (len(widths), default, nominal, byteCost(widths, default, nominal))
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()
"""cffLib: read/write Adobe CFF fonts
OpenType fonts with PostScript outlines embed a completely independent
font file in Adobe's *Compact Font Format*. So dealing with OpenType fonts
requires also dealing with CFF. This module allows you to read and write
fonts written in the CFF format.
In 2016, OpenType 1.8 introduced the `CFF2 `_
format which, along with other changes, extended the CFF format to deal with
the demands of variable fonts. This module parses both original CFF and CFF2.
"""
from fontTools.misc import sstruct
from fontTools.misc import psCharStrings
from fontTools.misc.arrayTools import unionRect, intRect
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
tobytes,
tostr,
safeEval,
)
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.otBase import OTTableWriter
from fontTools.ttLib.tables.otBase import OTTableReader
from fontTools.ttLib.tables import otTables as ot
from io import BytesIO
import struct
import logging
import re
# mute cffLib debug messages when running ttx in verbose mode
DEBUG = logging.DEBUG - 1
log = logging.getLogger(__name__)
cffHeaderFormat = """
major: B
minor: B
hdrSize: B
"""
maxStackLimit = 513
# maxstack operator has been deprecated. max stack is now always 513.
class CFFFontSet(object):
"""A CFF font "file" can contain more than one font, although this is
extremely rare (and not allowed within OpenType fonts).
This class is the entry point for parsing a CFF table. To actually
manipulate the data inside the CFF font, you will want to access the
``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet``
object can either be treated as a dictionary (with appropriate
``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict`
objects, or as a list.
.. code:: python
from fontTools import ttLib
tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf")
tt["CFF "].cff
#
tt["CFF "].cff[0] # Here's your actual font data
#
"""
def decompile(self, file, otFont, isCFF2=None):
"""Parse a binary CFF file into an internal representation. ``file``
should be a file handle object. ``otFont`` is the top-level
:py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
library makes an assertion that the CFF header is of the appropriate
version.
"""
self.otFont = otFont
sstruct.unpack(cffHeaderFormat, file.read(3), self)
if isCFF2 is not None:
# called from ttLib: assert 'major' as read from file matches the
# expected version
expected_major = 2 if isCFF2 else 1
if self.major != expected_major:
raise ValueError(
"Invalid CFF 'major' version: expected %d, found %d"
% (expected_major, self.major)
)
else:
# use 'major' version from file to determine if isCFF2
assert self.major in (1, 2), "Unknown CFF format"
isCFF2 = self.major == 2
if not isCFF2:
self.offSize = struct.unpack("B", file.read(1))[0]
file.seek(self.hdrSize)
self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2))
self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2)
self.strings = IndexedStrings(file)
else: # isCFF2
self.topDictSize = struct.unpack(">H", file.read(2))[0]
file.seek(self.hdrSize)
self.fontNames = ["CFF2Font"]
cff2GetGlyphOrder = otFont.getGlyphOrder
# in CFF2, offsetSize is the size of the TopDict data.
self.topDictIndex = TopDictIndex(
file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2
)
self.strings = None
self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2)
self.topDictIndex.strings = self.strings
self.topDictIndex.GlobalSubrs = self.GlobalSubrs
def __len__(self):
return len(self.fontNames)
def keys(self):
return list(self.fontNames)
def values(self):
return self.topDictIndex
def __getitem__(self, nameOrIndex):
"""Return TopDict instance identified by name (str) or index (int
or any object that implements `__index__`).
"""
if hasattr(nameOrIndex, "__index__"):
index = nameOrIndex.__index__()
elif isinstance(nameOrIndex, str):
name = nameOrIndex
try:
index = self.fontNames.index(name)
except ValueError:
raise KeyError(nameOrIndex)
else:
raise TypeError(nameOrIndex)
return self.topDictIndex[index]
def compile(self, file, otFont, isCFF2=None):
"""Write the object back into binary representation onto the given file.
``file`` should be a file handle object. ``otFont`` is the top-level
:py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
library makes an assertion that the CFF header is of the appropriate
version.
"""
self.otFont = otFont
if isCFF2 is not None:
# called from ttLib: assert 'major' value matches expected version
expected_major = 2 if isCFF2 else 1
if self.major != expected_major:
raise ValueError(
"Invalid CFF 'major' version: expected %d, found %d"
% (expected_major, self.major)
)
else:
# use current 'major' value to determine output format
assert self.major in (1, 2), "Unknown CFF format"
isCFF2 = self.major == 2
if otFont.recalcBBoxes and not isCFF2:
for topDict in self.topDictIndex:
topDict.recalcFontBBox()
if not isCFF2:
strings = IndexedStrings()
else:
strings = None
writer = CFFWriter(isCFF2)
topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2)
if isCFF2:
self.hdrSize = 5
writer.add(sstruct.pack(cffHeaderFormat, self))
# Note: topDictSize will most likely change in CFFWriter.toFile().
self.topDictSize = topCompiler.getDataLength()
writer.add(struct.pack(">H", self.topDictSize))
else:
self.hdrSize = 4
self.offSize = 4 # will most likely change in CFFWriter.toFile().
writer.add(sstruct.pack(cffHeaderFormat, self))
writer.add(struct.pack("B", self.offSize))
if not isCFF2:
fontNames = Index()
for name in self.fontNames:
fontNames.append(name)
writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2))
writer.add(topCompiler)
if not isCFF2:
writer.add(strings.getCompiler())
writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2))
for topDict in self.topDictIndex:
if not hasattr(topDict, "charset") or topDict.charset is None:
charset = otFont.getGlyphOrder()
topDict.charset = charset
children = topCompiler.getChildren(strings)
for child in children:
writer.add(child)
writer.toFile(file)
def toXML(self, xmlWriter):
"""Write the object into XML representation onto the given
:class:`fontTools.misc.xmlWriter.XMLWriter`.
.. code:: python
writer = xmlWriter.XMLWriter(sys.stdout)
tt["CFF "].cff.toXML(writer)
"""
xmlWriter.simpletag("major", value=self.major)
xmlWriter.newline()
xmlWriter.simpletag("minor", value=self.minor)
xmlWriter.newline()
for fontName in self.fontNames:
xmlWriter.begintag("CFFFont", name=tostr(fontName))
xmlWriter.newline()
font = self[fontName]
font.toXML(xmlWriter)
xmlWriter.endtag("CFFFont")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.begintag("GlobalSubrs")
xmlWriter.newline()
self.GlobalSubrs.toXML(xmlWriter)
xmlWriter.endtag("GlobalSubrs")
xmlWriter.newline()
def fromXML(self, name, attrs, content, otFont=None):
"""Reads data from the XML element into the ``CFFFontSet`` object."""
self.otFont = otFont
# set defaults. These will be replaced if there are entries for them
# in the XML file.
if not hasattr(self, "major"):
self.major = 1
if not hasattr(self, "minor"):
self.minor = 0
if name == "CFFFont":
if self.major == 1:
if not hasattr(self, "offSize"):
# this will be recalculated when the cff is compiled.
self.offSize = 4
if not hasattr(self, "hdrSize"):
self.hdrSize = 4
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
if not hasattr(self, "fontNames"):
self.fontNames = []
self.topDictIndex = TopDictIndex()
fontName = attrs["name"]
self.fontNames.append(fontName)
topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
topDict.charset = None # gets filled in later
elif self.major == 2:
if not hasattr(self, "hdrSize"):
self.hdrSize = 5
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
if not hasattr(self, "fontNames"):
self.fontNames = ["CFF2Font"]
cff2GetGlyphOrder = self.otFont.getGlyphOrder
topDict = TopDict(
GlobalSubrs=self.GlobalSubrs, cff2GetGlyphOrder=cff2GetGlyphOrder
)
self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder)
self.topDictIndex.append(topDict)
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
topDict.fromXML(name, attrs, content)
if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
fdArray = topDict.FDArray
for fontDict in fdArray:
if hasattr(fontDict, "Private"):
fontDict.Private.vstore = topDict.VarStore
elif name == "GlobalSubrs":
subrCharStringClass = psCharStrings.T2CharString
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
subr = subrCharStringClass()
subr.fromXML(name, attrs, content)
self.GlobalSubrs.append(subr)
elif name == "major":
self.major = int(attrs["value"])
elif name == "minor":
self.minor = int(attrs["value"])
def convertCFFToCFF2(self, otFont):
from .CFFToCFF2 import _convertCFFToCFF2
_convertCFFToCFF2(self, otFont)
def convertCFF2ToCFF(self, otFont):
from .CFF2ToCFF import _convertCFF2ToCFF
_convertCFF2ToCFF(self, otFont)
def desubroutinize(self):
from .transforms import desubroutinize
desubroutinize(self)
def remove_hints(self):
from .transforms import remove_hints
remove_hints(self)
def remove_unused_subroutines(self):
from .transforms import remove_unused_subroutines
remove_unused_subroutines(self)
class CFFWriter(object):
"""Helper class for serializing CFF data to binary. Used by
:meth:`CFFFontSet.compile`."""
def __init__(self, isCFF2):
self.data = []
self.isCFF2 = isCFF2
def add(self, table):
self.data.append(table)
def toFile(self, file):
lastPosList = None
count = 1
while True:
log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
count = count + 1
pos = 0
posList = [pos]
for item in self.data:
if hasattr(item, "getDataLength"):
endPos = pos + item.getDataLength()
if isinstance(item, TopDictIndexCompiler) and item.isCFF2:
self.topDictSize = item.getDataLength()
else:
endPos = pos + len(item)
if hasattr(item, "setPos"):
item.setPos(pos, endPos)
pos = endPos
posList.append(pos)
if posList == lastPosList:
break
lastPosList = posList
log.log(DEBUG, "CFFWriter.toFile() writing to file.")
begin = file.tell()
if self.isCFF2:
self.data[1] = struct.pack(">H", self.topDictSize)
else:
self.offSize = calcOffSize(lastPosList[-1])
self.data[1] = struct.pack("B", self.offSize)
posList = [0]
for item in self.data:
if hasattr(item, "toFile"):
item.toFile(file)
else:
file.write(item)
posList.append(file.tell() - begin)
assert posList == lastPosList
def calcOffSize(largestOffset):
if largestOffset < 0x100:
offSize = 1
elif largestOffset < 0x10000:
offSize = 2
elif largestOffset < 0x1000000:
offSize = 3
else:
offSize = 4
return offSize
class IndexCompiler(object):
"""Base class for writing CFF `INDEX data `_
to binary."""
def __init__(self, items, strings, parent, isCFF2=None):
if isCFF2 is None and hasattr(parent, "isCFF2"):
isCFF2 = parent.isCFF2
assert isCFF2 is not None
self.isCFF2 = isCFF2
self.items = self.getItems(items, strings)
self.parent = parent
def getItems(self, items, strings):
return items
def getOffsets(self):
# An empty INDEX contains only the count field.
if self.items:
pos = 1
offsets = [pos]
for item in self.items:
if hasattr(item, "getDataLength"):
pos = pos + item.getDataLength()
else:
pos = pos + len(item)
offsets.append(pos)
else:
offsets = []
return offsets
def getDataLength(self):
if self.isCFF2:
countSize = 4
else:
countSize = 2
if self.items:
lastOffset = self.getOffsets()[-1]
offSize = calcOffSize(lastOffset)
dataLength = (
countSize
+ 1 # count
+ (len(self.items) + 1) * offSize # offSize
+ lastOffset # the offsets
- 1 # size of object data
)
else:
# count. For empty INDEX tables, this is the only entry.
dataLength = countSize
return dataLength
def toFile(self, file):
offsets = self.getOffsets()
if self.isCFF2:
writeCard32(file, len(self.items))
else:
writeCard16(file, len(self.items))
# An empty INDEX contains only the count field.
if self.items:
offSize = calcOffSize(offsets[-1])
writeCard8(file, offSize)
offSize = -offSize
pack = struct.pack
for offset in offsets:
binOffset = pack(">l", offset)[offSize:]
assert len(binOffset) == -offSize
file.write(binOffset)
for item in self.items:
if hasattr(item, "toFile"):
item.toFile(file)
else:
data = tobytes(item, encoding="latin1")
file.write(data)
class IndexedStringsCompiler(IndexCompiler):
def getItems(self, items, strings):
return items.strings
class TopDictIndexCompiler(IndexCompiler):
"""Helper class for writing the TopDict to binary."""
def getItems(self, items, strings):
out = []
for item in items:
out.append(item.getCompiler(strings, self))
return out
def getChildren(self, strings):
children = []
for topDict in self.items:
children.extend(topDict.getChildren(strings))
return children
def getOffsets(self):
if self.isCFF2:
offsets = [0, self.items[0].getDataLength()]
return offsets
else:
return super(TopDictIndexCompiler, self).getOffsets()
def getDataLength(self):
if self.isCFF2:
dataLength = self.items[0].getDataLength()
return dataLength
else:
return super(TopDictIndexCompiler, self).getDataLength()
def toFile(self, file):
if self.isCFF2:
self.items[0].toFile(file)
else:
super(TopDictIndexCompiler, self).toFile(file)
class FDArrayIndexCompiler(IndexCompiler):
"""Helper class for writing the
`Font DICT INDEX `_
to binary."""
def getItems(self, items, strings):
out = []
for item in items:
out.append(item.getCompiler(strings, self))
return out
def getChildren(self, strings):
children = []
for fontDict in self.items:
children.extend(fontDict.getChildren(strings))
return children
def toFile(self, file):
offsets = self.getOffsets()
if self.isCFF2:
writeCard32(file, len(self.items))
else:
writeCard16(file, len(self.items))
offSize = calcOffSize(offsets[-1])
writeCard8(file, offSize)
offSize = -offSize
pack = struct.pack
for offset in offsets:
binOffset = pack(">l", offset)[offSize:]
assert len(binOffset) == -offSize
file.write(binOffset)
for item in self.items:
if hasattr(item, "toFile"):
item.toFile(file)
else:
file.write(item)
def setPos(self, pos, endPos):
self.parent.rawDict["FDArray"] = pos
class GlobalSubrsCompiler(IndexCompiler):
"""Helper class for writing the `global subroutine INDEX `_
to binary."""
def getItems(self, items, strings):
out = []
for cs in items:
cs.compile(self.isCFF2)
out.append(cs.bytecode)
return out
class SubrsCompiler(GlobalSubrsCompiler):
"""Helper class for writing the `local subroutine INDEX `_
to binary."""
def setPos(self, pos, endPos):
offset = pos - self.parent.pos
self.parent.rawDict["Subrs"] = offset
class CharStringsCompiler(GlobalSubrsCompiler):
"""Helper class for writing the `CharStrings INDEX `_
to binary."""
def getItems(self, items, strings):
out = []
for cs in items:
cs.compile(self.isCFF2)
out.append(cs.bytecode)
return out
def setPos(self, pos, endPos):
self.parent.rawDict["CharStrings"] = pos
class Index(object):
"""This class represents what the CFF spec calls an INDEX (an array of
variable-sized objects). `Index` items can be addressed and set using
Python list indexing."""
compilerClass = IndexCompiler
def __init__(self, file=None, isCFF2=None):
self.items = []
self.offsets = offsets = []
name = self.__class__.__name__
if file is None:
return
self._isCFF2 = isCFF2
log.log(DEBUG, "loading %s at %s", name, file.tell())
self.file = file
if isCFF2:
count = readCard32(file)
else:
count = readCard16(file)
if count == 0:
return
self.items = [None] * count
offSize = readCard8(file)
log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
assert offSize <= 4, "offSize too large: %s" % offSize
pad = b"\0" * (4 - offSize)
for index in range(count + 1):
chunk = file.read(offSize)
chunk = pad + chunk
(offset,) = struct.unpack(">L", chunk)
offsets.append(int(offset))
self.offsetBase = file.tell() - 1
file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
log.log(DEBUG, " end of %s at %s", name, file.tell())
def __len__(self):
return len(self.items)
def __getitem__(self, index):
item = self.items[index]
if item is not None:
return item
offset = self.offsets[index] + self.offsetBase
size = self.offsets[index + 1] - self.offsets[index]
file = self.file
file.seek(offset)
data = file.read(size)
assert len(data) == size
item = self.produceItem(index, data, file, offset)
self.items[index] = item
return item
def __setitem__(self, index, item):
self.items[index] = item
def produceItem(self, index, data, file, offset):
return data
def append(self, item):
"""Add an item to an INDEX."""
self.items.append(item)
def getCompiler(self, strings, parent, isCFF2=None):
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
def clear(self):
"""Empty the INDEX."""
del self.items[:]
class GlobalSubrsIndex(Index):
"""This index contains all the global subroutines in the font. A global
subroutine is a set of ``CharString`` data which is accessible to any
glyph in the font, and are used to store repeated instructions - for
example, components may be encoded as global subroutines, but so could
hinting instructions.
Remember that when interpreting a ``callgsubr`` instruction (or indeed
a ``callsubr`` instruction) that you will need to add the "subroutine
number bias" to number given:
.. code:: python
tt = ttLib.TTFont("Almendra-Bold.otf")
u = tt["CFF "].cff[0].CharStrings["udieresis"]
u.decompile()
u.toXML(XMLWriter(sys.stdout))
#
# -64 callgsubr <-- Subroutine which implements the dieresis mark
#
tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG
#
tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT
#
("The bias applied depends on the number of subrs (gsubrs). If the number of
subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less
than 33900, it is 1131; otherwise it is 32768.",
`Subroutine Operators `)
"""
compilerClass = GlobalSubrsCompiler
subrClass = psCharStrings.T2CharString
charStringClass = psCharStrings.T2CharString
def __init__(
self,
file=None,
globalSubrs=None,
private=None,
fdSelect=None,
fdArray=None,
isCFF2=None,
):
super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2)
self.globalSubrs = globalSubrs
self.private = private
if fdSelect:
self.fdSelect = fdSelect
if fdArray:
self.fdArray = fdArray
def produceItem(self, index, data, file, offset):
if self.private is not None:
private = self.private
elif hasattr(self, "fdArray") and self.fdArray is not None:
if hasattr(self, "fdSelect") and self.fdSelect is not None:
fdIndex = self.fdSelect[index]
else:
fdIndex = 0
private = self.fdArray[fdIndex].Private
else:
private = None
return self.subrClass(data, private=private, globalSubrs=self.globalSubrs)
def toXML(self, xmlWriter):
"""Write the subroutines index into XML representation onto the given
:class:`fontTools.misc.xmlWriter.XMLWriter`.
.. code:: python
writer = xmlWriter.XMLWriter(sys.stdout)
tt["CFF "].cff[0].GlobalSubrs.toXML(writer)
"""
xmlWriter.comment(
"The 'index' attribute is only for humans; " "it is ignored when parsed."
)
xmlWriter.newline()
for i in range(len(self)):
subr = self[i]
if subr.needsDecompilation():
xmlWriter.begintag("CharString", index=i, raw=1)
else:
xmlWriter.begintag("CharString", index=i)
xmlWriter.newline()
subr.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
def fromXML(self, name, attrs, content):
if name != "CharString":
return
subr = self.subrClass()
subr.fromXML(name, attrs, content)
self.append(subr)
def getItemAndSelector(self, index):
sel = None
if hasattr(self, "fdSelect"):
sel = self.fdSelect[index]
return self[index], sel
class SubrsIndex(GlobalSubrsIndex):
"""This index contains a glyph's local subroutines. A local subroutine is a
private set of ``CharString`` data which is accessible only to the glyph to
which the index is attached."""
compilerClass = SubrsCompiler
class TopDictIndex(Index):
"""This index represents the array of ``TopDict`` structures in the font
(again, usually only one entry is present). Hence the following calls are
equivalent:
.. code:: python
tt["CFF "].cff[0]
#
tt["CFF "].cff.topDictIndex[0]
#
"""
compilerClass = TopDictIndexCompiler
def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, isCFF2=None):
self.cff2GetGlyphOrder = cff2GetGlyphOrder
if file is not None and isCFF2:
self._isCFF2 = isCFF2
self.items = []
name = self.__class__.__name__
log.log(DEBUG, "loading %s at %s", name, file.tell())
self.file = file
count = 1
self.items = [None] * count
self.offsets = [0, topSize]
self.offsetBase = file.tell()
# pretend we've read the whole lot
file.seek(self.offsetBase + topSize)
log.log(DEBUG, " end of %s at %s", name, file.tell())
else:
super(TopDictIndex, self).__init__(file, isCFF2=isCFF2)
def produceItem(self, index, data, file, offset):
top = TopDict(
self.strings,
file,
offset,
self.GlobalSubrs,
self.cff2GetGlyphOrder,
isCFF2=self._isCFF2,
)
top.decompile(data)
return top
def toXML(self, xmlWriter):
for i in range(len(self)):
xmlWriter.begintag("FontDict", index=i)
xmlWriter.newline()
self[i].toXML(xmlWriter)
xmlWriter.endtag("FontDict")
xmlWriter.newline()
class FDArrayIndex(Index):
compilerClass = FDArrayIndexCompiler
def toXML(self, xmlWriter):
for i in range(len(self)):
xmlWriter.begintag("FontDict", index=i)
xmlWriter.newline()
self[i].toXML(xmlWriter)
xmlWriter.endtag("FontDict")
xmlWriter.newline()
def produceItem(self, index, data, file, offset):
fontDict = FontDict(
self.strings,
file,
offset,
self.GlobalSubrs,
isCFF2=self._isCFF2,
vstore=self.vstore,
)
fontDict.decompile(data)
return fontDict
def fromXML(self, name, attrs, content):
if name != "FontDict":
return
fontDict = FontDict()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
fontDict.fromXML(name, attrs, content)
self.append(fontDict)
class VarStoreData(object):
def __init__(self, file=None, otVarStore=None):
self.file = file
self.data = None
self.otVarStore = otVarStore
self.font = TTFont() # dummy font for the decompile function.
def decompile(self):
if self.file:
# read data in from file. Assume position is correct.
length = readCard16(self.file)
# https://github.com/fonttools/fonttools/issues/3673
if length == 65535:
self.data = self.file.read()
else:
self.data = self.file.read(length)
globalState = {}
reader = OTTableReader(self.data, globalState)
self.otVarStore = ot.VarStore()
self.otVarStore.decompile(reader, self.font)
self.data = None
return self
def compile(self):
writer = OTTableWriter()
self.otVarStore.compile(writer, self.font)
# Note that this omits the initial Card16 length from the CFF2
# VarStore data block
self.data = writer.getAllData()
def writeXML(self, xmlWriter, name):
self.otVarStore.toXML(xmlWriter, self.font)
def xmlRead(self, name, attrs, content, parent):
self.otVarStore = ot.VarStore()
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
self.otVarStore.fromXML(name, attrs, content, self.font)
else:
pass
return None
def __len__(self):
return len(self.data)
def getNumRegions(self, vsIndex):
if vsIndex is None:
vsIndex = 0
varData = self.otVarStore.VarData[vsIndex]
numRegions = varData.VarRegionCount
return numRegions
class FDSelect(object):
def __init__(self, file=None, numGlyphs=None, format=None):
if file:
# read data in from file
self.format = readCard8(file)
if self.format == 0:
from array import array
self.gidArray = array("B", file.read(numGlyphs)).tolist()
elif self.format == 3:
gidArray = [None] * numGlyphs
nRanges = readCard16(file)
fd = None
prev = None
for i in range(nRanges):
first = readCard16(file)
if prev is not None:
for glyphID in range(prev, first):
gidArray[glyphID] = fd
prev = first
fd = readCard8(file)
if prev is not None:
first = readCard16(file)
for glyphID in range(prev, first):
gidArray[glyphID] = fd
self.gidArray = gidArray
elif self.format == 4:
gidArray = [None] * numGlyphs
nRanges = readCard32(file)
fd = None
prev = None
for i in range(nRanges):
first = readCard32(file)
if prev is not None:
for glyphID in range(prev, first):
gidArray[glyphID] = fd
prev = first
fd = readCard16(file)
if prev is not None:
first = readCard32(file)
for glyphID in range(prev, first):
gidArray[glyphID] = fd
self.gidArray = gidArray
else:
assert False, "unsupported FDSelect format: %s" % format
else:
# reading from XML. Make empty gidArray, and leave format as passed in.
# format is None will result in the smallest representation being used.
self.format = format
self.gidArray = []
def __len__(self):
return len(self.gidArray)
def __getitem__(self, index):
return self.gidArray[index]
def __setitem__(self, index, fdSelectValue):
self.gidArray[index] = fdSelectValue
def append(self, fdSelectValue):
self.gidArray.append(fdSelectValue)
class CharStrings(object):
"""The ``CharStrings`` in the font represent the instructions for drawing
each glyph. This object presents a dictionary interface to the font's
CharStrings, indexed by glyph name:
.. code:: python
tt["CFF "].cff[0].CharStrings["a"]
#
See :class:`fontTools.misc.psCharStrings.T1CharString` and
:class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile,
compile and interpret the glyph drawing instructions in the returned objects.
"""
def __init__(
self,
file,
charset,
globalSubrs,
private,
fdSelect,
fdArray,
isCFF2=None,
varStore=None,
):
self.globalSubrs = globalSubrs
self.varStore = varStore
if file is not None:
self.charStringsIndex = SubrsIndex(
file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2
)
self.charStrings = charStrings = {}
for i in range(len(charset)):
charStrings[charset[i]] = i
# read from OTF file: charStrings.values() are indices into
# charStringsIndex.
self.charStringsAreIndexed = 1
else:
self.charStrings = {}
# read from ttx file: charStrings.values() are actual charstrings
self.charStringsAreIndexed = 0
self.private = private
if fdSelect is not None:
self.fdSelect = fdSelect
if fdArray is not None:
self.fdArray = fdArray
def keys(self):
return list(self.charStrings.keys())
def values(self):
if self.charStringsAreIndexed:
return self.charStringsIndex
else:
return list(self.charStrings.values())
def has_key(self, name):
return name in self.charStrings
__contains__ = has_key
def __len__(self):
return len(self.charStrings)
def __getitem__(self, name):
charString = self.charStrings[name]
if self.charStringsAreIndexed:
charString = self.charStringsIndex[charString]
return charString
def __setitem__(self, name, charString):
if self.charStringsAreIndexed:
index = self.charStrings[name]
self.charStringsIndex[index] = charString
else:
self.charStrings[name] = charString
def getItemAndSelector(self, name):
if self.charStringsAreIndexed:
index = self.charStrings[name]
return self.charStringsIndex.getItemAndSelector(index)
else:
if hasattr(self, "fdArray"):
if hasattr(self, "fdSelect"):
sel = self.charStrings[name].fdSelectIndex
else:
sel = 0
else:
sel = None
return self.charStrings[name], sel
def toXML(self, xmlWriter):
names = sorted(self.keys())
for name in names:
charStr, fdSelectIndex = self.getItemAndSelector(name)
if charStr.needsDecompilation():
raw = [("raw", 1)]
else:
raw = []
if fdSelectIndex is None:
xmlWriter.begintag("CharString", [("name", name)] + raw)
else:
xmlWriter.begintag(
"CharString",
[("name", name), ("fdSelectIndex", fdSelectIndex)] + raw,
)
xmlWriter.newline()
charStr.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
def fromXML(self, name, attrs, content):
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
if name != "CharString":
continue
fdID = -1
if hasattr(self, "fdArray"):
try:
fdID = safeEval(attrs["fdSelectIndex"])
except KeyError:
fdID = 0
private = self.fdArray[fdID].Private
else:
private = self.private
glyphName = attrs["name"]
charStringClass = psCharStrings.T2CharString
charString = charStringClass(private=private, globalSubrs=self.globalSubrs)
charString.fromXML(name, attrs, content)
if fdID >= 0:
charString.fdSelectIndex = fdID
self[glyphName] = charString
def readCard8(file):
return byteord(file.read(1))
def readCard16(file):
(value,) = struct.unpack(">H", file.read(2))
return value
def readCard32(file):
(value,) = struct.unpack(">L", file.read(4))
return value
def writeCard8(file, value):
file.write(bytechr(value))
def writeCard16(file, value):
file.write(struct.pack(">H", value))
def writeCard32(file, value):
file.write(struct.pack(">L", value))
def packCard8(value):
return bytechr(value)
def packCard16(value):
return struct.pack(">H", value)
def packCard32(value):
return struct.pack(">L", value)
def buildOperatorDict(table):
d = {}
for op, name, arg, default, conv in table:
d[op] = (name, arg)
return d
def buildOpcodeDict(table):
d = {}
for op, name, arg, default, conv in table:
if isinstance(op, tuple):
op = bytechr(op[0]) + bytechr(op[1])
else:
op = bytechr(op)
d[name] = (op, arg)
return d
def buildOrder(table):
l = []
for op, name, arg, default, conv in table:
l.append(name)
return l
def buildDefaults(table):
d = {}
for op, name, arg, default, conv in table:
if default is not None:
d[name] = default
return d
def buildConverters(table):
d = {}
for op, name, arg, default, conv in table:
d[name] = conv
return d
class SimpleConverter(object):
def read(self, parent, value):
if not hasattr(parent, "file"):
return self._read(parent, value)
file = parent.file
pos = file.tell()
try:
return self._read(parent, value)
finally:
file.seek(pos)
def _read(self, parent, value):
return value
def write(self, parent, value):
return value
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return attrs["value"]
class ASCIIConverter(SimpleConverter):
def _read(self, parent, value):
return tostr(value, encoding="ascii")
def write(self, parent, value):
return tobytes(value, encoding="ascii")
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, value=tostr(value, encoding="ascii"))
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return tobytes(attrs["value"], encoding=("ascii"))
class Latin1Converter(SimpleConverter):
def _read(self, parent, value):
return tostr(value, encoding="latin1")
def write(self, parent, value):
return tobytes(value, encoding="latin1")
def xmlWrite(self, xmlWriter, name, value):
value = tostr(value, encoding="latin1")
if name in ["Notice", "Copyright"]:
value = re.sub(r"[\r\n]\s+", " ", value)
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return tobytes(attrs["value"], encoding=("latin1"))
def parseNum(s):
try:
value = int(s)
except:
value = float(s)
return value
def parseBlendList(s):
valueList = []
for element in s:
if isinstance(element, str):
continue
name, attrs, content = element
blendList = attrs["value"].split()
blendList = [eval(val) for val in blendList]
valueList.append(blendList)
if len(valueList) == 1:
valueList = valueList[0]
return valueList
class NumberConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
if isinstance(value, list):
xmlWriter.begintag(name)
xmlWriter.newline()
xmlWriter.indent()
blendValue = " ".join([str(val) for val in value])
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
xmlWriter.newline()
xmlWriter.dedent()
xmlWriter.endtag(name)
xmlWriter.newline()
else:
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
valueString = attrs.get("value", None)
if valueString is None:
value = parseBlendList(content)
else:
value = parseNum(attrs["value"])
return value
class ArrayConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
if value and isinstance(value[0], list):
xmlWriter.begintag(name)
xmlWriter.newline()
xmlWriter.indent()
for valueList in value:
blendValue = " ".join([str(val) for val in valueList])
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
xmlWriter.newline()
xmlWriter.dedent()
xmlWriter.endtag(name)
xmlWriter.newline()
else:
value = " ".join([str(val) for val in value])
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
valueString = attrs.get("value", None)
if valueString is None:
valueList = parseBlendList(content)
else:
values = valueString.split()
valueList = [parseNum(value) for value in values]
return valueList
class TableConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.begintag(name)
xmlWriter.newline()
value.toXML(xmlWriter)
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
ob = self.getClass()()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
ob.fromXML(name, attrs, content)
return ob
class PrivateDictConverter(TableConverter):
def getClass(self):
return PrivateDict
def _read(self, parent, value):
size, offset = value
file = parent.file
isCFF2 = parent._isCFF2
try:
vstore = parent.vstore
except AttributeError:
vstore = None
priv = PrivateDict(parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore)
file.seek(offset)
data = file.read(size)
assert len(data) == size
priv.decompile(data)
return priv
def write(self, parent, value):
return (0, 0) # dummy value
class SubrsConverter(TableConverter):
def getClass(self):
return SubrsIndex
def _read(self, parent, value):
file = parent.file
isCFF2 = parent._isCFF2
file.seek(parent.offset + value) # Offset(self)
return SubrsIndex(file, isCFF2=isCFF2)
def write(self, parent, value):
return 0 # dummy value
class CharStringsConverter(TableConverter):
def _read(self, parent, value):
file = parent.file
isCFF2 = parent._isCFF2
charset = parent.charset
varStore = getattr(parent, "VarStore", None)
globalSubrs = parent.GlobalSubrs
if hasattr(parent, "FDArray"):
fdArray = parent.FDArray
if hasattr(parent, "FDSelect"):
fdSelect = parent.FDSelect
else:
fdSelect = None
private = None
else:
fdSelect, fdArray = None, None
private = parent.Private
file.seek(value) # Offset(0)
charStrings = CharStrings(
file,
charset,
globalSubrs,
private,
fdSelect,
fdArray,
isCFF2=isCFF2,
varStore=varStore,
)
return charStrings
def write(self, parent, value):
return 0 # dummy value
def xmlRead(self, name, attrs, content, parent):
if hasattr(parent, "FDArray"):
# if it is a CID-keyed font, then the private Dict is extracted from the
# parent.FDArray
fdArray = parent.FDArray
if hasattr(parent, "FDSelect"):
fdSelect = parent.FDSelect
else:
fdSelect = None
private = None
else:
# if it is a name-keyed font, then the private dict is in the top dict,
# and
# there is no fdArray.
private, fdSelect, fdArray = parent.Private, None, None
charStrings = CharStrings(
None,
None,
parent.GlobalSubrs,
private,
fdSelect,
fdArray,
varStore=getattr(parent, "VarStore", None),
)
charStrings.fromXML(name, attrs, content)
return charStrings
class CharsetConverter(SimpleConverter):
def _read(self, parent, value):
isCID = hasattr(parent, "ROS")
if value > 2:
numGlyphs = parent.numGlyphs
file = parent.file
file.seek(value)
log.log(DEBUG, "loading charset at %s", value)
format = readCard8(file)
if format == 0:
charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
elif format == 1 or format == 2:
charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
else:
raise NotImplementedError
assert len(charset) == numGlyphs
log.log(DEBUG, " charset end at %s", file.tell())
# make sure glyph names are unique
allNames = {}
newCharset = []
for glyphName in charset:
if glyphName in allNames:
# make up a new glyphName that's unique
n = allNames[glyphName]
names = set(allNames) | set(charset)
while (glyphName + "." + str(n)) in names:
n += 1
allNames[glyphName] = n + 1
glyphName = glyphName + "." + str(n)
allNames[glyphName] = 1
newCharset.append(glyphName)
charset = newCharset
else: # offset == 0 -> no charset data.
if isCID or "CharStrings" not in parent.rawDict:
# We get here only when processing fontDicts from the FDArray of
# CFF-CID fonts. Only the real topDict references the charset.
assert value == 0
charset = None
elif value == 0:
charset = cffISOAdobeStrings
elif value == 1:
charset = cffIExpertStrings
elif value == 2:
charset = cffExpertSubsetStrings
if charset and (len(charset) != parent.numGlyphs):
charset = charset[: parent.numGlyphs]
return charset
def write(self, parent, value):
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
# XXX only write charset when not in OT/TTX context, where we
# dump charset as a separate "GlyphOrder" table.
# # xmlWriter.simpletag("charset")
xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
pass
class CharsetCompiler(object):
def __init__(self, strings, charset, parent):
assert charset[0] == ".notdef"
isCID = hasattr(parent.dictObj, "ROS")
data0 = packCharset0(charset, isCID, strings)
data = packCharset(charset, isCID, strings)
if len(data) < len(data0):
self.data = data
else:
self.data = data0
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["charset"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
def getStdCharSet(charset):
# check to see if we can use a predefined charset value.
predefinedCharSetVal = None
predefinedCharSets = [
(cffISOAdobeStringCount, cffISOAdobeStrings, 0),
(cffExpertStringCount, cffIExpertStrings, 1),
(cffExpertSubsetStringCount, cffExpertSubsetStrings, 2),
]
lcs = len(charset)
for cnt, pcs, csv in predefinedCharSets:
if predefinedCharSetVal is not None:
break
if lcs > cnt:
continue
predefinedCharSetVal = csv
for i in range(lcs):
if charset[i] != pcs[i]:
predefinedCharSetVal = None
break
return predefinedCharSetVal
def getCIDfromName(name, strings):
return int(name[3:])
def getSIDfromName(name, strings):
return strings.getSID(name)
def packCharset0(charset, isCID, strings):
fmt = 0
data = [packCard8(fmt)]
if isCID:
getNameID = getCIDfromName
else:
getNameID = getSIDfromName
for name in charset[1:]:
data.append(packCard16(getNameID(name, strings)))
return bytesjoin(data)
def packCharset(charset, isCID, strings):
fmt = 1
ranges = []
first = None
end = 0
if isCID:
getNameID = getCIDfromName
else:
getNameID = getSIDfromName
for name in charset[1:]:
SID = getNameID(name, strings)
if first is None:
first = SID
elif end + 1 != SID:
nLeft = end - first
if nLeft > 255:
fmt = 2
ranges.append((first, nLeft))
first = SID
end = SID
if end:
nLeft = end - first
if nLeft > 255:
fmt = 2
ranges.append((first, nLeft))
data = [packCard8(fmt)]
if fmt == 1:
nLeftFunc = packCard8
else:
nLeftFunc = packCard16
for first, nLeft in ranges:
data.append(packCard16(first) + nLeftFunc(nLeft))
return bytesjoin(data)
def parseCharset0(numGlyphs, file, strings, isCID):
charset = [".notdef"]
if isCID:
for i in range(numGlyphs - 1):
CID = readCard16(file)
charset.append("cid" + str(CID).zfill(5))
else:
for i in range(numGlyphs - 1):
SID = readCard16(file)
charset.append(strings[SID])
return charset
def parseCharset(numGlyphs, file, strings, isCID, fmt):
charset = [".notdef"]
count = 1
if fmt == 1:
nLeftFunc = readCard8
else:
nLeftFunc = readCard16
while count < numGlyphs:
first = readCard16(file)
nLeft = nLeftFunc(file)
if isCID:
for CID in range(first, first + nLeft + 1):
charset.append("cid" + str(CID).zfill(5))
else:
for SID in range(first, first + nLeft + 1):
charset.append(strings[SID])
count = count + nLeft + 1
return charset
class EncodingCompiler(object):
def __init__(self, strings, encoding, parent):
assert not isinstance(encoding, str)
data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
if len(data0) < len(data1):
self.data = data0
else:
self.data = data1
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["Encoding"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class EncodingConverter(SimpleConverter):
def _read(self, parent, value):
if value == 0:
return "StandardEncoding"
elif value == 1:
return "ExpertEncoding"
# custom encoding at offset `value`
assert value > 1
file = parent.file
file.seek(value)
log.log(DEBUG, "loading Encoding at %s", value)
fmt = readCard8(file)
haveSupplement = bool(fmt & 0x80)
fmt = fmt & 0x7F
if fmt == 0:
encoding = parseEncoding0(parent.charset, file)
elif fmt == 1:
encoding = parseEncoding1(parent.charset, file)
else:
raise ValueError(f"Unknown Encoding format: {fmt}")
if haveSupplement:
parseEncodingSupplement(file, encoding, parent.strings)
return encoding
def write(self, parent, value):
if value == "StandardEncoding":
return 0
elif value == "ExpertEncoding":
return 1
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
if value in ("StandardEncoding", "ExpertEncoding"):
xmlWriter.simpletag(name, name=value)
xmlWriter.newline()
return
xmlWriter.begintag(name)
xmlWriter.newline()
for code in range(len(value)):
glyphName = value[code]
if glyphName != ".notdef":
xmlWriter.simpletag("map", code=hex(code), name=glyphName)
xmlWriter.newline()
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
if "name" in attrs:
return attrs["name"]
encoding = [".notdef"] * 256
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
code = safeEval(attrs["code"])
glyphName = attrs["name"]
encoding[code] = glyphName
return encoding
def readSID(file):
"""Read a String ID (SID) — 2-byte unsigned integer."""
data = file.read(2)
if len(data) != 2:
raise EOFError("Unexpected end of file while reading SID")
return struct.unpack(">H", data)[0] # big-endian uint16
def parseEncodingSupplement(file, encoding, strings):
"""
Parse the CFF Encoding supplement data:
- nSups: number of supplementary mappings
- each mapping: (code, SID) pair
and apply them to the `encoding` list in place.
"""
nSups = readCard8(file)
for _ in range(nSups):
code = readCard8(file)
sid = readSID(file)
name = strings[sid]
encoding[code] = name
def parseEncoding0(charset, file):
"""
Format 0: simple list of codes.
After reading the base table, optionally parse the supplement.
"""
nCodes = readCard8(file)
encoding = [".notdef"] * 256
for glyphID in range(1, nCodes + 1):
code = readCard8(file)
if code != 0:
encoding[code] = charset[glyphID]
return encoding
def parseEncoding1(charset, file):
"""
Format 1: range-based encoding.
After reading the base ranges, optionally parse the supplement.
"""
nRanges = readCard8(file)
encoding = [".notdef"] * 256
glyphID = 1
for _ in range(nRanges):
code = readCard8(file)
nLeft = readCard8(file)
for _ in range(nLeft + 1):
encoding[code] = charset[glyphID]
code += 1
glyphID += 1
return encoding
def packEncoding0(charset, encoding, strings):
fmt = 0
m = {}
for code in range(len(encoding)):
name = encoding[code]
if name != ".notdef":
m[name] = code
codes = []
for name in charset[1:]:
code = m.get(name)
codes.append(code)
while codes and codes[-1] is None:
codes.pop()
data = [packCard8(fmt), packCard8(len(codes))]
for code in codes:
if code is None:
code = 0
data.append(packCard8(code))
return bytesjoin(data)
def packEncoding1(charset, encoding, strings):
fmt = 1
m = {}
for code in range(len(encoding)):
name = encoding[code]
if name != ".notdef":
m[name] = code
ranges = []
first = None
end = 0
for name in charset[1:]:
code = m.get(name, -1)
if first is None:
first = code
elif end + 1 != code:
nLeft = end - first
ranges.append((first, nLeft))
first = code
end = code
nLeft = end - first
ranges.append((first, nLeft))
# remove unencoded glyphs at the end.
while ranges and ranges[-1][0] == -1:
ranges.pop()
data = [packCard8(fmt), packCard8(len(ranges))]
for first, nLeft in ranges:
if first == -1: # unencoded
first = 0
data.append(packCard8(first) + packCard8(nLeft))
return bytesjoin(data)
class FDArrayConverter(TableConverter):
def _read(self, parent, value):
try:
vstore = parent.VarStore
except AttributeError:
vstore = None
file = parent.file
isCFF2 = parent._isCFF2
file.seek(value)
fdArray = FDArrayIndex(file, isCFF2=isCFF2)
fdArray.vstore = vstore
fdArray.strings = parent.strings
fdArray.GlobalSubrs = parent.GlobalSubrs
return fdArray
def write(self, parent, value):
return 0 # dummy value
def xmlRead(self, name, attrs, content, parent):
fdArray = FDArrayIndex()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
fdArray.fromXML(name, attrs, content)
return fdArray
class FDSelectConverter(SimpleConverter):
def _read(self, parent, value):
file = parent.file
file.seek(value)
fdSelect = FDSelect(file, parent.numGlyphs)
return fdSelect
def write(self, parent, value):
return 0 # dummy value
# The FDSelect glyph data is written out to XML in the charstring keys,
# so we write out only the format selector
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, [("format", value.format)])
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
fmt = safeEval(attrs["format"])
file = None
numGlyphs = None
fdSelect = FDSelect(file, numGlyphs, fmt)
return fdSelect
class VarStoreConverter(SimpleConverter):
def _read(self, parent, value):
file = parent.file
file.seek(value)
varStore = VarStoreData(file)
varStore.decompile()
return varStore
def write(self, parent, value):
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
value.writeXML(xmlWriter, name)
def xmlRead(self, name, attrs, content, parent):
varStore = VarStoreData()
varStore.xmlRead(name, attrs, content, parent)
return varStore
def packFDSelect0(fdSelectArray):
fmt = 0
data = [packCard8(fmt)]
for index in fdSelectArray:
data.append(packCard8(index))
return bytesjoin(data)
def packFDSelect3(fdSelectArray):
fmt = 3
fdRanges = []
lenArray = len(fdSelectArray)
lastFDIndex = -1
for i in range(lenArray):
fdIndex = fdSelectArray[i]
if lastFDIndex != fdIndex:
fdRanges.append([i, fdIndex])
lastFDIndex = fdIndex
sentinelGID = i + 1
data = [packCard8(fmt)]
data.append(packCard16(len(fdRanges)))
for fdRange in fdRanges:
data.append(packCard16(fdRange[0]))
data.append(packCard8(fdRange[1]))
data.append(packCard16(sentinelGID))
return bytesjoin(data)
def packFDSelect4(fdSelectArray):
fmt = 4
fdRanges = []
lenArray = len(fdSelectArray)
lastFDIndex = -1
for i in range(lenArray):
fdIndex = fdSelectArray[i]
if lastFDIndex != fdIndex:
fdRanges.append([i, fdIndex])
lastFDIndex = fdIndex
sentinelGID = i + 1
data = [packCard8(fmt)]
data.append(packCard32(len(fdRanges)))
for fdRange in fdRanges:
data.append(packCard32(fdRange[0]))
data.append(packCard16(fdRange[1]))
data.append(packCard32(sentinelGID))
return bytesjoin(data)
class FDSelectCompiler(object):
def __init__(self, fdSelect, parent):
fmt = fdSelect.format
fdSelectArray = fdSelect.gidArray
if fmt == 0:
self.data = packFDSelect0(fdSelectArray)
elif fmt == 3:
self.data = packFDSelect3(fdSelectArray)
elif fmt == 4:
self.data = packFDSelect4(fdSelectArray)
else:
# choose smaller of the two formats
data0 = packFDSelect0(fdSelectArray)
data3 = packFDSelect3(fdSelectArray)
if len(data0) < len(data3):
self.data = data0
fdSelect.format = 0
else:
self.data = data3
fdSelect.format = 3
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["FDSelect"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class VarStoreCompiler(object):
def __init__(self, varStoreData, parent):
self.parent = parent
if not varStoreData.data:
varStoreData.compile()
varStoreDataLen = min(0xFFFF, len(varStoreData.data))
data = [packCard16(varStoreDataLen), varStoreData.data]
self.data = bytesjoin(data)
def setPos(self, pos, endPos):
self.parent.rawDict["VarStore"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class ROSConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
registry, order, supplement = value
xmlWriter.simpletag(
name,
[
("Registry", tostr(registry)),
("Order", tostr(order)),
("Supplement", supplement),
],
)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return (attrs["Registry"], attrs["Order"], safeEval(attrs["Supplement"]))
topDictOperators = [
# opcode name argument type default converter
(25, "maxstack", "number", None, None),
((12, 30), "ROS", ("SID", "SID", "number"), None, ROSConverter()),
((12, 20), "SyntheticBase", "number", None, None),
(0, "version", "SID", None, None),
(1, "Notice", "SID", None, Latin1Converter()),
((12, 0), "Copyright", "SID", None, Latin1Converter()),
(2, "FullName", "SID", None, Latin1Converter()),
((12, 38), "FontName", "SID", None, Latin1Converter()),
(3, "FamilyName", "SID", None, Latin1Converter()),
(4, "Weight", "SID", None, None),
((12, 1), "isFixedPitch", "number", 0, None),
((12, 2), "ItalicAngle", "number", 0, None),
((12, 3), "UnderlinePosition", "number", -100, None),
((12, 4), "UnderlineThickness", "number", 50, None),
((12, 5), "PaintType", "number", 0, None),
((12, 6), "CharstringType", "number", 2, None),
((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
(13, "UniqueID", "number", None, None),
(5, "FontBBox", "array", [0, 0, 0, 0], None),
((12, 8), "StrokeWidth", "number", 0, None),
(14, "XUID", "array", None, None),
((12, 21), "PostScript", "SID", None, None),
((12, 22), "BaseFontName", "SID", None, None),
((12, 23), "BaseFontBlend", "delta", None, None),
((12, 31), "CIDFontVersion", "number", 0, None),
((12, 32), "CIDFontRevision", "number", 0, None),
((12, 33), "CIDFontType", "number", 0, None),
((12, 34), "CIDCount", "number", 8720, None),
(15, "charset", "number", None, CharsetConverter()),
((12, 35), "UIDBase", "number", None, None),
(16, "Encoding", "number", 0, EncodingConverter()),
(18, "Private", ("number", "number"), None, PrivateDictConverter()),
((12, 37), "FDSelect", "number", None, FDSelectConverter()),
((12, 36), "FDArray", "number", None, FDArrayConverter()),
(17, "CharStrings", "number", None, CharStringsConverter()),
(24, "VarStore", "number", None, VarStoreConverter()),
]
topDictOperators2 = [
# opcode name argument type default converter
(25, "maxstack", "number", None, None),
((12, 7), "FontMatrix", "array", [0.001, 0, 0, 0.001, 0, 0], None),
((12, 37), "FDSelect", "number", None, FDSelectConverter()),
((12, 36), "FDArray", "number", None, FDArrayConverter()),
(17, "CharStrings", "number", None, CharStringsConverter()),
(24, "VarStore", "number", None, VarStoreConverter()),
]
# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
# in order for the font to compile back from xml.
kBlendDictOpName = "blend"
blendOp = 23
privateDictOperators = [
# opcode name argument type default converter
(22, "vsindex", "number", None, None),
(
blendOp,
kBlendDictOpName,
"blendList",
None,
None,
), # This is for reading to/from XML: it not written to CFF.
(6, "BlueValues", "delta", None, None),
(7, "OtherBlues", "delta", None, None),
(8, "FamilyBlues", "delta", None, None),
(9, "FamilyOtherBlues", "delta", None, None),
((12, 9), "BlueScale", "number", 0.039625, None),
((12, 10), "BlueShift", "number", 7, None),
((12, 11), "BlueFuzz", "number", 1, None),
(10, "StdHW", "number", None, None),
(11, "StdVW", "number", None, None),
((12, 12), "StemSnapH", "delta", None, None),
((12, 13), "StemSnapV", "delta", None, None),
((12, 14), "ForceBold", "number", 0, None),
((12, 15), "ForceBoldThreshold", "number", None, None), # deprecated
((12, 16), "lenIV", "number", None, None), # deprecated
((12, 17), "LanguageGroup", "number", 0, None),
((12, 18), "ExpansionFactor", "number", 0.06, None),
((12, 19), "initialRandomSeed", "number", 0, None),
(20, "defaultWidthX", "number", 0, None),
(21, "nominalWidthX", "number", 0, None),
(19, "Subrs", "number", None, SubrsConverter()),
]
privateDictOperators2 = [
# opcode name argument type default converter
(22, "vsindex", "number", None, None),
(
blendOp,
kBlendDictOpName,
"blendList",
None,
None,
), # This is for reading to/from XML: it not written to CFF.
(6, "BlueValues", "delta", None, None),
(7, "OtherBlues", "delta", None, None),
(8, "FamilyBlues", "delta", None, None),
(9, "FamilyOtherBlues", "delta", None, None),
((12, 9), "BlueScale", "number", 0.039625, None),
((12, 10), "BlueShift", "number", 7, None),
((12, 11), "BlueFuzz", "number", 1, None),
(10, "StdHW", "number", None, None),
(11, "StdVW", "number", None, None),
((12, 12), "StemSnapH", "delta", None, None),
((12, 13), "StemSnapV", "delta", None, None),
((12, 17), "LanguageGroup", "number", 0, None),
((12, 18), "ExpansionFactor", "number", 0.06, None),
(19, "Subrs", "number", None, SubrsConverter()),
]
def addConverters(table):
for i in range(len(table)):
op, name, arg, default, conv = table[i]
if conv is not None:
continue
if arg in ("delta", "array"):
conv = ArrayConverter()
elif arg == "number":
conv = NumberConverter()
elif arg == "SID":
conv = ASCIIConverter()
elif arg == "blendList":
conv = None
else:
assert False
table[i] = op, name, arg, default, conv
addConverters(privateDictOperators)
addConverters(topDictOperators)
class TopDictDecompiler(psCharStrings.DictDecompiler):
operators = buildOperatorDict(topDictOperators)
class PrivateDictDecompiler(psCharStrings.DictDecompiler):
operators = buildOperatorDict(privateDictOperators)
class DictCompiler(object):
maxBlendStack = 0
def __init__(self, dictObj, strings, parent, isCFF2=None):
if strings:
assert isinstance(strings, IndexedStrings)
if isCFF2 is None and hasattr(parent, "isCFF2"):
isCFF2 = parent.isCFF2
assert isCFF2 is not None
self.isCFF2 = isCFF2
self.dictObj = dictObj
self.strings = strings
self.parent = parent
rawDict = {}
for name in dictObj.order:
value = getattr(dictObj, name, None)
if value is None:
continue
conv = dictObj.converters[name]
value = conv.write(dictObj, value)
if value == dictObj.defaults.get(name):
continue
rawDict[name] = value
self.rawDict = rawDict
def setPos(self, pos, endPos):
pass
def getDataLength(self):
return len(self.compile("getDataLength"))
def compile(self, reason):
log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
rawDict = self.rawDict
data = []
for name in self.dictObj.order:
value = rawDict.get(name)
if value is None:
continue
op, argType = self.opcodes[name]
if isinstance(argType, tuple):
l = len(argType)
assert len(value) == l, "value doesn't match arg type"
for i in range(l):
arg = argType[i]
v = value[i]
arghandler = getattr(self, "arg_" + arg)
data.append(arghandler(v))
else:
arghandler = getattr(self, "arg_" + argType)
data.append(arghandler(value))
data.append(op)
data = bytesjoin(data)
return data
def toFile(self, file):
data = self.compile("toFile")
file.write(data)
def arg_number(self, num):
if isinstance(num, list):
data = [encodeNumber(val) for val in num]
data.append(encodeNumber(1))
data.append(bytechr(blendOp))
datum = bytesjoin(data)
else:
datum = encodeNumber(num)
return datum
def arg_SID(self, s):
return psCharStrings.encodeIntCFF(self.strings.getSID(s))
def arg_array(self, value):
data = []
for num in value:
data.append(self.arg_number(num))
return bytesjoin(data)
def arg_delta(self, value):
if not value:
return b""
val0 = value[0]
if isinstance(val0, list):
data = self.arg_delta_blend(value)
else:
out = []
last = 0
for v in value:
out.append(v - last)
last = v
data = []
for num in out:
data.append(encodeNumber(num))
return bytesjoin(data)
def arg_delta_blend(self, value):
"""A delta list with blend lists has to be *all* blend lists.
The value is a list is arranged as follows::
[
[V0, d0..dn]
[V1, d0..dn]
...
[Vm, d0..dn]
]
``V`` is the absolute coordinate value from the default font, and ``d0-dn``
are the delta values from the *n* regions. Each ``V`` is an absolute
coordinate from the default font.
We want to return a list::
[
[v0, v1..vm]
[d0..dn]
...
[d0..dn]
numBlends
blendOp
]
where each ``v`` is relative to the previous default font value.
"""
numMasters = len(value[0])
numBlends = len(value)
numStack = (numBlends * numMasters) + 1
if numStack > self.maxBlendStack:
# Figure out the max number of value we can blend
# and divide this list up into chunks of that size.
numBlendValues = int((self.maxBlendStack - 1) / numMasters)
out = []
while True:
numVal = min(len(value), numBlendValues)
if numVal == 0:
break
valList = value[0:numVal]
out1 = self.arg_delta_blend(valList)
out.extend(out1)
value = value[numVal:]
else:
firstList = [0] * numBlends
deltaList = [None] * numBlends
i = 0
prevVal = 0
while i < numBlends:
# For PrivateDict BlueValues, the default font
# values are absolute, not relative.
# Must convert these back to relative coordinates
# before writing to CFF2.
defaultValue = value[i][0]
firstList[i] = defaultValue - prevVal
prevVal = defaultValue
deltaList[i] = value[i][1:]
i += 1
relValueList = firstList
for blendList in deltaList:
relValueList.extend(blendList)
out = [encodeNumber(val) for val in relValueList]
out.append(encodeNumber(numBlends))
out.append(bytechr(blendOp))
return out
def encodeNumber(num):
if isinstance(num, float):
return psCharStrings.encodeFloat(num)
else:
return psCharStrings.encodeIntCFF(num)
class TopDictCompiler(DictCompiler):
opcodes = buildOpcodeDict(topDictOperators)
def getChildren(self, strings):
isCFF2 = self.isCFF2
children = []
if self.dictObj.cff2GetGlyphOrder is None:
if hasattr(self.dictObj, "charset") and self.dictObj.charset:
if hasattr(self.dictObj, "ROS"): # aka isCID
charsetCode = None
else:
charsetCode = getStdCharSet(self.dictObj.charset)
if charsetCode is None:
children.append(
CharsetCompiler(strings, self.dictObj.charset, self)
)
else:
self.rawDict["charset"] = charsetCode
if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding:
encoding = self.dictObj.Encoding
if not isinstance(encoding, str):
children.append(EncodingCompiler(strings, encoding, self))
else:
if hasattr(self.dictObj, "VarStore"):
varStoreData = self.dictObj.VarStore
varStoreComp = VarStoreCompiler(varStoreData, self)
children.append(varStoreComp)
if hasattr(self.dictObj, "FDSelect"):
# I have not yet supported merging a ttx CFF-CID font, as there are
# interesting issues about merging the FDArrays. Here I assume that
# either the font was read from XML, and the FDSelect indices are all
# in the charstring data, or the FDSelect array is already fully defined.
fdSelect = self.dictObj.FDSelect
# probably read in from XML; assume fdIndex in CharString data
if len(fdSelect) == 0:
charStrings = self.dictObj.CharStrings
for name in self.dictObj.charset:
fdSelect.append(charStrings[name].fdSelectIndex)
fdSelectComp = FDSelectCompiler(fdSelect, self)
children.append(fdSelectComp)
if hasattr(self.dictObj, "CharStrings"):
items = []
charStrings = self.dictObj.CharStrings
for name in self.dictObj.charset:
items.append(charStrings[name])
charStringsComp = CharStringsCompiler(items, strings, self, isCFF2=isCFF2)
children.append(charStringsComp)
if hasattr(self.dictObj, "FDArray"):
# I have not yet supported merging a ttx CFF-CID font, as there are
# interesting issues about merging the FDArrays. Here I assume that the
# FDArray info is correct and complete.
fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
children.append(fdArrayIndexComp)
children.extend(fdArrayIndexComp.getChildren(strings))
if hasattr(self.dictObj, "Private"):
privComp = self.dictObj.Private.getCompiler(strings, self)
children.append(privComp)
children.extend(privComp.getChildren(strings))
return children
class FontDictCompiler(DictCompiler):
opcodes = buildOpcodeDict(topDictOperators)
def __init__(self, dictObj, strings, parent, isCFF2=None):
super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2)
#
# We now take some effort to detect if there were any key/value pairs
# supplied that were ignored in the FontDict context, and issue a warning
# for those cases.
#
ignoredNames = []
dictObj = self.dictObj
for name in sorted(set(dictObj.converters) - set(dictObj.order)):
if name in dictObj.rawDict:
# The font was directly read from binary. In this
# case, we want to report *all* "useless" key/value
# pairs that are in the font, not just the ones that
# are different from the default.
ignoredNames.append(name)
else:
# The font was probably read from a TTX file. We only
# warn about keys whos value is not the default. The
# ones that have the default value will not be written
# to binary anyway.
default = dictObj.defaults.get(name)
if default is not None:
conv = dictObj.converters[name]
default = conv.read(dictObj, default)
if getattr(dictObj, name, None) != default:
ignoredNames.append(name)
if ignoredNames:
log.warning(
"Some CFF FDArray/FontDict keys were ignored upon compile: "
+ " ".join(sorted(ignoredNames))
)
def getChildren(self, strings):
children = []
if hasattr(self.dictObj, "Private"):
privComp = self.dictObj.Private.getCompiler(strings, self)
children.append(privComp)
children.extend(privComp.getChildren(strings))
return children
class PrivateDictCompiler(DictCompiler):
maxBlendStack = maxStackLimit
opcodes = buildOpcodeDict(privateDictOperators)
def setPos(self, pos, endPos):
size = endPos - pos
self.parent.rawDict["Private"] = size, pos
self.pos = pos
def getChildren(self, strings):
children = []
if hasattr(self.dictObj, "Subrs"):
children.append(self.dictObj.Subrs.getCompiler(strings, self))
return children
class BaseDict(object):
def __init__(self, strings=None, file=None, offset=None, isCFF2=None):
assert (isCFF2 is None) == (file is None)
self.rawDict = {}
self.skipNames = []
self.strings = strings
if file is None:
return
self._isCFF2 = isCFF2
self.file = file
if offset is not None:
log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
self.offset = offset
def decompile(self, data):
log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
dec = self.decompilerClass(self.strings, self)
dec.decompile(data)
self.rawDict = dec.getDict()
self.postDecompile()
def postDecompile(self):
pass
def getCompiler(self, strings, parent, isCFF2=None):
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
def __getattr__(self, name):
if name[:2] == name[-2:] == "__":
# to make deepcopy() and pickle.load() work, we need to signal with
# AttributeError that dunder methods like '__deepcopy__' or '__getstate__'
# aren't implemented. For more details, see:
# https://github.com/fonttools/fonttools/pull/1488
raise AttributeError(name)
value = self.rawDict.get(name, None)
if value is None:
value = self.defaults.get(name)
if value is None:
raise AttributeError(name)
conv = self.converters[name]
value = conv.read(self, value)
setattr(self, name, value)
return value
def toXML(self, xmlWriter):
for name in self.order:
if name in self.skipNames:
continue
value = getattr(self, name, None)
# XXX For "charset" we never skip calling xmlWrite even if the
# value is None, so we always write the following XML comment:
#
#
#
# Charset is None when 'CFF ' table is imported from XML into an
# empty TTFont(). By writing this comment all the time, we obtain
# the same XML output whether roundtripping XML-to-XML or
# dumping binary-to-XML
if value is None and name != "charset":
continue
conv = self.converters[name]
conv.xmlWrite(xmlWriter, name, value)
ignoredNames = set(self.rawDict) - set(self.order)
if ignoredNames:
xmlWriter.comment(
"some keys were ignored: %s" % " ".join(sorted(ignoredNames))
)
xmlWriter.newline()
def fromXML(self, name, attrs, content):
conv = self.converters[name]
value = conv.xmlRead(name, attrs, content, self)
setattr(self, name, value)
class TopDict(BaseDict):
"""The ``TopDict`` represents the top-level dictionary holding font
information. CFF2 tables contain a restricted set of top-level entries
as described `here `_,
but CFF tables may contain a wider range of information. This information
can be accessed through attributes or through the dictionary returned
through the ``rawDict`` property:
.. code:: python
font = tt["CFF "].cff[0]
font.FamilyName
# 'Linux Libertine O'
font.rawDict["FamilyName"]
# 'Linux Libertine O'
More information is available in the CFF file's private dictionary, accessed
via the ``Private`` property:
.. code:: python
tt["CFF "].cff[0].Private.BlueValues
# [-15, 0, 515, 515, 666, 666]
"""
defaults = buildDefaults(topDictOperators)
converters = buildConverters(topDictOperators)
compilerClass = TopDictCompiler
order = buildOrder(topDictOperators)
decompilerClass = TopDictDecompiler
def __init__(
self,
strings=None,
file=None,
offset=None,
GlobalSubrs=None,
cff2GetGlyphOrder=None,
isCFF2=None,
):
super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.cff2GetGlyphOrder = cff2GetGlyphOrder
self.GlobalSubrs = GlobalSubrs
if isCFF2:
self.defaults = buildDefaults(topDictOperators2)
self.charset = cff2GetGlyphOrder()
self.order = buildOrder(topDictOperators2)
else:
self.defaults = buildDefaults(topDictOperators)
self.order = buildOrder(topDictOperators)
def getGlyphOrder(self):
"""Returns a list of glyph names in the CFF font."""
return self.charset
def postDecompile(self):
offset = self.rawDict.get("CharStrings")
if offset is None:
return
# get the number of glyphs beforehand.
self.file.seek(offset)
if self._isCFF2:
self.numGlyphs = readCard32(self.file)
else:
self.numGlyphs = readCard16(self.file)
def toXML(self, xmlWriter):
if hasattr(self, "CharStrings"):
self.decompileAllCharStrings()
if hasattr(self, "ROS"):
self.skipNames = ["Encoding"]
if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
# these values have default values, but I only want them to show up
# in CID fonts.
self.skipNames = [
"CIDFontVersion",
"CIDFontRevision",
"CIDFontType",
"CIDCount",
]
BaseDict.toXML(self, xmlWriter)
def decompileAllCharStrings(self):
# Make sure that all the Private Dicts have been instantiated.
for i, charString in enumerate(self.CharStrings.values()):
try:
charString.decompile()
except:
log.error("Error in charstring %s", i)
raise
def recalcFontBBox(self):
fontBBox = None
for charString in self.CharStrings.values():
bounds = charString.calcBounds(self.CharStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is None:
self.FontBBox = self.defaults["FontBBox"][:]
else:
self.FontBBox = list(intRect(fontBBox))
class FontDict(BaseDict):
#
# Since fonttools used to pass a lot of fields that are not relevant in the FDArray
# FontDict, there are 'ttx' files in the wild that contain all these. These got in
# the ttx files because fonttools writes explicit values for all the TopDict default
# values. These are not actually illegal in the context of an FDArray FontDict - you
# can legally, per spec, put any arbitrary key/value pair in a FontDict - but are
# useless since current major company CFF interpreters ignore anything but the set
# listed in this file. So, we just silently skip them. An exception is Weight: this
# is not used by any interpreter, but some foundries have asked that this be
# supported in FDArray FontDicts just to preserve information about the design when
# the font is being inspected.
#
# On top of that, there are fonts out there that contain such useless FontDict values.
#
# By subclassing TopDict, we *allow* all key/values from TopDict, both when reading
# from binary or when reading from XML, but by overriding `order` with a limited
# list of names, we ensure that only the useful names ever get exported to XML and
# ever get compiled into the binary font.
#
# We override compilerClass so we can warn about "useless" key/value pairs, either
# from the original binary font or from TTX input.
#
# See:
# - https://github.com/fonttools/fonttools/issues/740
# - https://github.com/fonttools/fonttools/issues/601
# - https://github.com/adobe-type-tools/afdko/issues/137
#
defaults = {}
converters = buildConverters(topDictOperators)
compilerClass = FontDictCompiler
orderCFF = ["FontName", "FontMatrix", "Weight", "Private"]
orderCFF2 = ["Private"]
decompilerClass = TopDictDecompiler
def __init__(
self,
strings=None,
file=None,
offset=None,
GlobalSubrs=None,
isCFF2=None,
vstore=None,
):
super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.vstore = vstore
self.setCFF2(isCFF2)
def setCFF2(self, isCFF2):
# isCFF2 may be None.
if isCFF2:
self.order = self.orderCFF2
self._isCFF2 = True
else:
self.order = self.orderCFF
self._isCFF2 = False
class PrivateDict(BaseDict):
defaults = buildDefaults(privateDictOperators)
converters = buildConverters(privateDictOperators)
order = buildOrder(privateDictOperators)
decompilerClass = PrivateDictDecompiler
compilerClass = PrivateDictCompiler
def __init__(self, strings=None, file=None, offset=None, isCFF2=None, vstore=None):
super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.vstore = vstore
if isCFF2:
self.defaults = buildDefaults(privateDictOperators2)
self.order = buildOrder(privateDictOperators2)
# Provide dummy values. This avoids needing to provide
# an isCFF2 state in a lot of places.
self.nominalWidthX = self.defaultWidthX = None
self._isCFF2 = True
else:
self.defaults = buildDefaults(privateDictOperators)
self.order = buildOrder(privateDictOperators)
self._isCFF2 = False
@property
def in_cff2(self):
return self._isCFF2
def getNumRegions(self, vi=None): # called from misc/psCharStrings.py
# if getNumRegions is being called, we can assume that VarStore exists.
if vi is None:
if hasattr(self, "vsindex"):
vi = self.vsindex
else:
vi = 0
numRegions = self.vstore.getNumRegions(vi)
return numRegions
class IndexedStrings(object):
"""SID -> string mapping."""
def __init__(self, file=None):
if file is None:
strings = []
else:
strings = [tostr(s, encoding="latin1") for s in Index(file, isCFF2=False)]
self.strings = strings
def getCompiler(self):
return IndexedStringsCompiler(self, None, self, isCFF2=False)
def __len__(self):
return len(self.strings)
def __getitem__(self, SID):
if SID < cffStandardStringCount:
return cffStandardStrings[SID]
else:
return self.strings[SID - cffStandardStringCount]
def getSID(self, s):
if not hasattr(self, "stringMapping"):
self.buildStringMapping()
s = tostr(s, encoding="latin1")
if s in cffStandardStringMapping:
SID = cffStandardStringMapping[s]
elif s in self.stringMapping:
SID = self.stringMapping[s]
else:
SID = len(self.strings) + cffStandardStringCount
self.strings.append(s)
self.stringMapping[s] = SID
return SID
def getStrings(self):
return self.strings
def buildStringMapping(self):
self.stringMapping = {}
for index in range(len(self.strings)):
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
# The 391 Standard Strings as used in the CFF format.
# from Adobe Technical None #5176, version 1.0, 18 March 1998
cffStandardStrings = [
".notdef",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quoteright",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
"questiondown",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
"AE",
"ordfeminine",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
"ae",
"dotlessi",
"lslash",
"oslash",
"oe",
"germandbls",
"onesuperior",
"logicalnot",
"mu",
"trademark",
"Eth",
"onehalf",
"plusminus",
"Thorn",
"onequarter",
"divide",
"brokenbar",
"degree",
"thorn",
"threequarters",
"twosuperior",
"registered",
"minus",
"eth",
"multiply",
"threesuperior",
"copyright",
"Aacute",
"Acircumflex",
"Adieresis",
"Agrave",
"Aring",
"Atilde",
"Ccedilla",
"Eacute",
"Ecircumflex",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Ntilde",
"Oacute",
"Ocircumflex",
"Odieresis",
"Ograve",
"Otilde",
"Scaron",
"Uacute",
"Ucircumflex",
"Udieresis",
"Ugrave",
"Yacute",
"Ydieresis",
"Zcaron",
"aacute",
"acircumflex",
"adieresis",
"agrave",
"aring",
"atilde",
"ccedilla",
"eacute",
"ecircumflex",
"edieresis",
"egrave",
"iacute",
"icircumflex",
"idieresis",
"igrave",
"ntilde",
"oacute",
"ocircumflex",
"odieresis",
"ograve",
"otilde",
"scaron",
"uacute",
"ucircumflex",
"udieresis",
"ugrave",
"yacute",
"ydieresis",
"zcaron",
"exclamsmall",
"Hungarumlautsmall",
"dollaroldstyle",
"dollarsuperior",
"ampersandsmall",
"Acutesmall",
"parenleftsuperior",
"parenrightsuperior",
"twodotenleader",
"onedotenleader",
"zerooldstyle",
"oneoldstyle",
"twooldstyle",
"threeoldstyle",
"fouroldstyle",
"fiveoldstyle",
"sixoldstyle",
"sevenoldstyle",
"eightoldstyle",
"nineoldstyle",
"commasuperior",
"threequartersemdash",
"periodsuperior",
"questionsmall",
"asuperior",
"bsuperior",
"centsuperior",
"dsuperior",
"esuperior",
"isuperior",
"lsuperior",
"msuperior",
"nsuperior",
"osuperior",
"rsuperior",
"ssuperior",
"tsuperior",
"ff",
"ffi",
"ffl",
"parenleftinferior",
"parenrightinferior",
"Circumflexsmall",
"hyphensuperior",
"Gravesmall",
"Asmall",
"Bsmall",
"Csmall",
"Dsmall",
"Esmall",
"Fsmall",
"Gsmall",
"Hsmall",
"Ismall",
"Jsmall",
"Ksmall",
"Lsmall",
"Msmall",
"Nsmall",
"Osmall",
"Psmall",
"Qsmall",
"Rsmall",
"Ssmall",
"Tsmall",
"Usmall",
"Vsmall",
"Wsmall",
"Xsmall",
"Ysmall",
"Zsmall",
"colonmonetary",
"onefitted",
"rupiah",
"Tildesmall",
"exclamdownsmall",
"centoldstyle",
"Lslashsmall",
"Scaronsmall",
"Zcaronsmall",
"Dieresissmall",
"Brevesmall",
"Caronsmall",
"Dotaccentsmall",
"Macronsmall",
"figuredash",
"hypheninferior",
"Ogoneksmall",
"Ringsmall",
"Cedillasmall",
"questiondownsmall",
"oneeighth",
"threeeighths",
"fiveeighths",
"seveneighths",
"onethird",
"twothirds",
"zerosuperior",
"foursuperior",
"fivesuperior",
"sixsuperior",
"sevensuperior",
"eightsuperior",
"ninesuperior",
"zeroinferior",
"oneinferior",
"twoinferior",
"threeinferior",
"fourinferior",
"fiveinferior",
"sixinferior",
"seveninferior",
"eightinferior",
"nineinferior",
"centinferior",
"dollarinferior",
"periodinferior",
"commainferior",
"Agravesmall",
"Aacutesmall",
"Acircumflexsmall",
"Atildesmall",
"Adieresissmall",
"Aringsmall",
"AEsmall",
"Ccedillasmall",
"Egravesmall",
"Eacutesmall",
"Ecircumflexsmall",
"Edieresissmall",
"Igravesmall",
"Iacutesmall",
"Icircumflexsmall",
"Idieresissmall",
"Ethsmall",
"Ntildesmall",
"Ogravesmall",
"Oacutesmall",
"Ocircumflexsmall",
"Otildesmall",
"Odieresissmall",
"OEsmall",
"Oslashsmall",
"Ugravesmall",
"Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall",
"Yacutesmall",
"Thornsmall",
"Ydieresissmall",
"001.000",
"001.001",
"001.002",
"001.003",
"Black",
"Bold",
"Book",
"Light",
"Medium",
"Regular",
"Roman",
"Semibold",
]
cffStandardStringCount = 391
assert len(cffStandardStrings) == cffStandardStringCount
# build reverse mapping
cffStandardStringMapping = {}
for _i in range(cffStandardStringCount):
cffStandardStringMapping[cffStandardStrings[_i]] = _i
cffISOAdobeStrings = [
".notdef",
"space",
"exclam",
"quotedbl",
"numbersign",
"dollar",
"percent",
"ampersand",
"quoteright",
"parenleft",
"parenright",
"asterisk",
"plus",
"comma",
"hyphen",
"period",
"slash",
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"colon",
"semicolon",
"less",
"equal",
"greater",
"question",
"at",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"bracketleft",
"backslash",
"bracketright",
"asciicircum",
"underscore",
"quoteleft",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"braceleft",
"bar",
"braceright",
"asciitilde",
"exclamdown",
"cent",
"sterling",
"fraction",
"yen",
"florin",
"section",
"currency",
"quotesingle",
"quotedblleft",
"guillemotleft",
"guilsinglleft",
"guilsinglright",
"fi",
"fl",
"endash",
"dagger",
"daggerdbl",
"periodcentered",
"paragraph",
"bullet",
"quotesinglbase",
"quotedblbase",
"quotedblright",
"guillemotright",
"ellipsis",
"perthousand",
"questiondown",
"grave",
"acute",
"circumflex",
"tilde",
"macron",
"breve",
"dotaccent",
"dieresis",
"ring",
"cedilla",
"hungarumlaut",
"ogonek",
"caron",
"emdash",
"AE",
"ordfeminine",
"Lslash",
"Oslash",
"OE",
"ordmasculine",
"ae",
"dotlessi",
"lslash",
"oslash",
"oe",
"germandbls",
"onesuperior",
"logicalnot",
"mu",
"trademark",
"Eth",
"onehalf",
"plusminus",
"Thorn",
"onequarter",
"divide",
"brokenbar",
"degree",
"thorn",
"threequarters",
"twosuperior",
"registered",
"minus",
"eth",
"multiply",
"threesuperior",
"copyright",
"Aacute",
"Acircumflex",
"Adieresis",
"Agrave",
"Aring",
"Atilde",
"Ccedilla",
"Eacute",
"Ecircumflex",
"Edieresis",
"Egrave",
"Iacute",
"Icircumflex",
"Idieresis",
"Igrave",
"Ntilde",
"Oacute",
"Ocircumflex",
"Odieresis",
"Ograve",
"Otilde",
"Scaron",
"Uacute",
"Ucircumflex",
"Udieresis",
"Ugrave",
"Yacute",
"Ydieresis",
"Zcaron",
"aacute",
"acircumflex",
"adieresis",
"agrave",
"aring",
"atilde",
"ccedilla",
"eacute",
"ecircumflex",
"edieresis",
"egrave",
"iacute",
"icircumflex",
"idieresis",
"igrave",
"ntilde",
"oacute",
"ocircumflex",
"odieresis",
"ograve",
"otilde",
"scaron",
"uacute",
"ucircumflex",
"udieresis",
"ugrave",
"yacute",
"ydieresis",
"zcaron",
]
cffISOAdobeStringCount = 229
assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
cffIExpertStrings = [
".notdef",
"space",
"exclamsmall",
"Hungarumlautsmall",
"dollaroldstyle",
"dollarsuperior",
"ampersandsmall",
"Acutesmall",
"parenleftsuperior",
"parenrightsuperior",
"twodotenleader",
"onedotenleader",
"comma",
"hyphen",
"period",
"fraction",
"zerooldstyle",
"oneoldstyle",
"twooldstyle",
"threeoldstyle",
"fouroldstyle",
"fiveoldstyle",
"sixoldstyle",
"sevenoldstyle",
"eightoldstyle",
"nineoldstyle",
"colon",
"semicolon",
"commasuperior",
"threequartersemdash",
"periodsuperior",
"questionsmall",
"asuperior",
"bsuperior",
"centsuperior",
"dsuperior",
"esuperior",
"isuperior",
"lsuperior",
"msuperior",
"nsuperior",
"osuperior",
"rsuperior",
"ssuperior",
"tsuperior",
"ff",
"fi",
"fl",
"ffi",
"ffl",
"parenleftinferior",
"parenrightinferior",
"Circumflexsmall",
"hyphensuperior",
"Gravesmall",
"Asmall",
"Bsmall",
"Csmall",
"Dsmall",
"Esmall",
"Fsmall",
"Gsmall",
"Hsmall",
"Ismall",
"Jsmall",
"Ksmall",
"Lsmall",
"Msmall",
"Nsmall",
"Osmall",
"Psmall",
"Qsmall",
"Rsmall",
"Ssmall",
"Tsmall",
"Usmall",
"Vsmall",
"Wsmall",
"Xsmall",
"Ysmall",
"Zsmall",
"colonmonetary",
"onefitted",
"rupiah",
"Tildesmall",
"exclamdownsmall",
"centoldstyle",
"Lslashsmall",
"Scaronsmall",
"Zcaronsmall",
"Dieresissmall",
"Brevesmall",
"Caronsmall",
"Dotaccentsmall",
"Macronsmall",
"figuredash",
"hypheninferior",
"Ogoneksmall",
"Ringsmall",
"Cedillasmall",
"onequarter",
"onehalf",
"threequarters",
"questiondownsmall",
"oneeighth",
"threeeighths",
"fiveeighths",
"seveneighths",
"onethird",
"twothirds",
"zerosuperior",
"onesuperior",
"twosuperior",
"threesuperior",
"foursuperior",
"fivesuperior",
"sixsuperior",
"sevensuperior",
"eightsuperior",
"ninesuperior",
"zeroinferior",
"oneinferior",
"twoinferior",
"threeinferior",
"fourinferior",
"fiveinferior",
"sixinferior",
"seveninferior",
"eightinferior",
"nineinferior",
"centinferior",
"dollarinferior",
"periodinferior",
"commainferior",
"Agravesmall",
"Aacutesmall",
"Acircumflexsmall",
"Atildesmall",
"Adieresissmall",
"Aringsmall",
"AEsmall",
"Ccedillasmall",
"Egravesmall",
"Eacutesmall",
"Ecircumflexsmall",
"Edieresissmall",
"Igravesmall",
"Iacutesmall",
"Icircumflexsmall",
"Idieresissmall",
"Ethsmall",
"Ntildesmall",
"Ogravesmall",
"Oacutesmall",
"Ocircumflexsmall",
"Otildesmall",
"Odieresissmall",
"OEsmall",
"Oslashsmall",
"Ugravesmall",
"Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall",
"Yacutesmall",
"Thornsmall",
"Ydieresissmall",
]
cffExpertStringCount = 166
assert len(cffIExpertStrings) == cffExpertStringCount
cffExpertSubsetStrings = [
".notdef",
"space",
"dollaroldstyle",
"dollarsuperior",
"parenleftsuperior",
"parenrightsuperior",
"twodotenleader",
"onedotenleader",
"comma",
"hyphen",
"period",
"fraction",
"zerooldstyle",
"oneoldstyle",
"twooldstyle",
"threeoldstyle",
"fouroldstyle",
"fiveoldstyle",
"sixoldstyle",
"sevenoldstyle",
"eightoldstyle",
"nineoldstyle",
"colon",
"semicolon",
"commasuperior",
"threequartersemdash",
"periodsuperior",
"asuperior",
"bsuperior",
"centsuperior",
"dsuperior",
"esuperior",
"isuperior",
"lsuperior",
"msuperior",
"nsuperior",
"osuperior",
"rsuperior",
"ssuperior",
"tsuperior",
"ff",
"fi",
"fl",
"ffi",
"ffl",
"parenleftinferior",
"parenrightinferior",
"hyphensuperior",
"colonmonetary",
"onefitted",
"rupiah",
"centoldstyle",
"figuredash",
"hypheninferior",
"onequarter",
"onehalf",
"threequarters",
"oneeighth",
"threeeighths",
"fiveeighths",
"seveneighths",
"onethird",
"twothirds",
"zerosuperior",
"onesuperior",
"twosuperior",
"threesuperior",
"foursuperior",
"fivesuperior",
"sixsuperior",
"sevensuperior",
"eightsuperior",
"ninesuperior",
"zeroinferior",
"oneinferior",
"twoinferior",
"threeinferior",
"fourinferior",
"fiveinferior",
"sixinferior",
"seveninferior",
"eightinferior",
"nineinferior",
"centinferior",
"dollarinferior",
"periodinferior",
"commainferior",
]
cffExpertSubsetStringCount = 87
assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
"""
colorLib.builder: Build COLR/CPAL tables from scratch
"""
import collections
import copy
import enum
from functools import partial
from math import ceil, log
from typing import (
Any,
Dict,
Generator,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from fontTools.misc.arrayTools import intRect
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.misc.treeTools import build_n_ary_tree
from fontTools.ttLib.tables import C_O_L_R_
from fontTools.ttLib.tables import C_P_A_L_
from fontTools.ttLib.tables import _n_a_m_e
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode
from .errors import ColorLibError
from .geometry import round_start_circle_stable_containment
from .table_builder import BuildCallback, TableBuilder
# TODO move type aliases to colorLib.types?
T = TypeVar("T")
_Kwargs = Mapping[str, Any]
_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]]
_PaintInputList = Sequence[_PaintInput]
_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]]
_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]]
_ClipBoxInput = Union[
Tuple[int, int, int, int, int], # format 1, variable
Tuple[int, int, int, int], # format 0, non-variable
ot.ClipBox,
]
MAX_PAINT_COLR_LAYER_COUNT = 255
_DEFAULT_ALPHA = 1.0
_MAX_REUSE_LEN = 32
def _beforeBuildPaintRadialGradient(paint, source):
x0 = source["x0"]
y0 = source["y0"]
r0 = source["r0"]
x1 = source["x1"]
y1 = source["y1"]
r1 = source["r1"]
# TODO apparently no builder_test confirms this works (?)
# avoid abrupt change after rounding when c0 is near c1's perimeter
c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1)
x0, y0 = c.centre
r0 = c.radius
# update source to ensure paint is built with corrected values
source["x0"] = x0
source["y0"] = y0
source["r0"] = r0
source["x1"] = x1
source["y1"] = y1
source["r1"] = r1
return paint, source
def _defaultColorStop():
colorStop = ot.ColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultVarColorStop():
colorStop = ot.VarColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultColorLine():
colorLine = ot.ColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultVarColorLine():
colorLine = ot.VarColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultPaintSolid():
paint = ot.Paint()
paint.Alpha = _DEFAULT_ALPHA
return paint
def _buildPaintCallbacks():
return {
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintRadialGradient,
): _beforeBuildPaintRadialGradient,
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintVarRadialGradient,
): _beforeBuildPaintRadialGradient,
(BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop,
(BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop,
(BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine,
(BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintSolid,
): _defaultPaintSolid,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintVarSolid,
): _defaultPaintSolid,
}
def populateCOLRv0(
table: ot.COLR,
colorGlyphsV0: _ColorGlyphsV0Dict,
glyphMap: Optional[Mapping[str, int]] = None,
):
"""Build v0 color layers and add to existing COLR table.
Args:
table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``).
colorGlyphsV0: map of base glyph names to lists of (layer glyph names,
color palette index) tuples. Can be empty.
glyphMap: a map from glyph names to glyph indices, as returned from
``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID.
"""
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphsV0.items()
baseGlyphRecords = []
layerRecords = []
for baseGlyph, layers in colorGlyphItems:
baseRec = ot.BaseGlyphRecord()
baseRec.BaseGlyph = baseGlyph
baseRec.FirstLayerIndex = len(layerRecords)
baseRec.NumLayers = len(layers)
baseGlyphRecords.append(baseRec)
for layerGlyph, paletteIndex in layers:
layerRec = ot.LayerRecord()
layerRec.LayerGlyph = layerGlyph
layerRec.PaletteIndex = paletteIndex
layerRecords.append(layerRec)
table.BaseGlyphRecordArray = table.LayerRecordArray = None
if baseGlyphRecords:
table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
if layerRecords:
table.LayerRecordArray = ot.LayerRecordArray()
table.LayerRecordArray.LayerRecord = layerRecords
table.BaseGlyphRecordCount = len(baseGlyphRecords)
table.LayerRecordCount = len(layerRecords)
def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: Optional[int] = None,
*,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
varIndexMap: Optional[ot.DeltaSetIndexMap] = None,
clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None,
allowLayerReuse: bool = True,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: map of base glyph name to, either list of (layer glyph name,
color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or
list of ``Paint`` for COLRv1.
version: the version of COLR table. If None, the version is determined
by the presence of COLRv1 paints or variation data (varStore), which
require version 1; otherwise, if all base glyphs use only simple color
layers, version 0 is used.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer.
clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples:
(xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase).
Returns:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
if version in (None, 0) and not varStore:
# split color glyphs into v0 and v1 and encode separately
colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs)
if version == 0 and colorGlyphsV1:
raise ValueError("Can't encode COLRv1 glyphs in COLRv0")
else:
# unless explicitly requested for v1 or have variations, in which case
# we encode all color glyph as v1
colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs
colr = ot.COLR()
populateCOLRv0(colr, colorGlyphsV0, glyphMap)
colr.LayerList, colr.BaseGlyphList = buildColrV1(
colorGlyphsV1,
glyphMap,
allowLayerReuse=allowLayerReuse,
)
if version is None:
version = 1 if (varStore or colorGlyphsV1) else 0
elif version not in (0, 1):
raise NotImplementedError(version)
self.version = colr.Version = version
if version == 0:
self.ColorLayers = self._decompileColorLayersV0(colr)
else:
colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None
colr.VarIndexMap = varIndexMap
colr.VarStore = varStore
self.table = colr
return self
def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList:
clipList = ot.ClipList()
clipList.Format = 1
clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()}
return clipList
def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox:
if isinstance(clipBox, ot.ClipBox):
return clipBox
n = len(clipBox)
clip = ot.ClipBox()
if n not in (4, 5):
raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}")
clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4])
clip.Format = int(n == 5) + 1
if n == 5:
clip.VarIndexBase = int(clipBox[4])
return clip
class ColorPaletteType(enum.IntFlag):
USABLE_WITH_LIGHT_BACKGROUND = 0x0001
USABLE_WITH_DARK_BACKGROUND = 0x0002
@classmethod
def _missing_(cls, value):
# enforce reserved bits
if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0):
raise ValueError(f"{value} is not a valid {cls.__name__}")
return super()._missing_(value)
# None, 'abc' or {'en': 'abc', 'de': 'xyz'}
_OptionalLocalizedString = Union[None, str, Dict[str, str]]
def buildPaletteLabels(
labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e
) -> List[Optional[int]]:
return [
(
nameTable.addMultilingualName(l, mac=False)
if isinstance(l, dict)
else (
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
if l is None
else nameTable.addMultilingualName({"en": l}, mac=False)
)
)
for l in labels
]
def buildCPAL(
palettes: Sequence[Sequence[Tuple[float, float, float, float]]],
paletteTypes: Optional[Sequence[ColorPaletteType]] = None,
paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None,
) -> C_P_A_L_.table_C_P_A_L_:
"""Build CPAL table from list of color palettes.
Args:
palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats
in the range [0..1].
paletteTypes: optional list of ColorPaletteType, one for each palette.
paletteLabels: optional list of palette labels. Each lable can be either:
None (no label), a string (for for default English labels), or a
localized string (as a dict keyed with BCP47 language codes).
paletteEntryLabels: optional list of palette entry labels, one for each
palette entry (see paletteLabels).
nameTable: optional name table where to store palette and palette entry
labels. Required if either paletteLabels or paletteEntryLabels is set.
Return:
A new CPAL v0 or v1 table, if custom palette types or labels are specified.
"""
if len({len(p) for p in palettes}) != 1:
raise ColorLibError("color palettes have different lengths")
if (paletteLabels or paletteEntryLabels) and not nameTable:
raise TypeError(
"nameTable is required if palette or palette entries have labels"
)
cpal = C_P_A_L_.table_C_P_A_L_()
cpal.numPaletteEntries = len(palettes[0])
cpal.palettes = []
for i, palette in enumerate(palettes):
colors = []
for j, color in enumerate(palette):
if not isinstance(color, tuple) or len(color) != 4:
raise ColorLibError(
f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}"
)
if any(v > 1 or v < 0 for v in color):
raise ColorLibError(
f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}"
)
# input colors are RGBA, CPAL encodes them as BGRA
red, green, blue, alpha = color
colors.append(
C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha)))
)
cpal.palettes.append(colors)
if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)):
cpal.version = 1
if paletteTypes is not None:
if len(paletteTypes) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}"
)
cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes]
else:
cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len(
palettes
)
if paletteLabels is not None:
if len(paletteLabels) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}"
)
cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable)
else:
cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes)
if paletteEntryLabels is not None:
if len(paletteEntryLabels) != cpal.numPaletteEntries:
raise ColorLibError(
f"Expected {cpal.numPaletteEntries} paletteEntryLabels, "
f"got {len(paletteEntryLabels)}"
)
cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable)
else:
cpal.paletteEntryLabels = [
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
] * cpal.numPaletteEntries
else:
cpal.version = 0
return cpal
# COLR v1 tables
# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec
def _is_colrv0_layer(layer: Any) -> bool:
# Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which
# the first element is a str (the layerGlyph) and the second element is an int
# (CPAL paletteIndex).
# https://github.com/googlefonts/ufo2ft/issues/426
try:
layerGlyph, paletteIndex = layer
except (TypeError, ValueError):
return False
else:
return isinstance(layerGlyph, str) and isinstance(paletteIndex, int)
def _split_color_glyphs_by_version(
colorGlyphs: _ColorGlyphsDict,
) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]:
colorGlyphsV0 = {}
colorGlyphsV1 = {}
for baseGlyph, layers in colorGlyphs.items():
if all(_is_colrv0_layer(l) for l in layers):
colorGlyphsV0[baseGlyph] = layers
else:
colorGlyphsV1[baseGlyph] = layers
# sanity check
assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1))
return colorGlyphsV0, colorGlyphsV1
def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]:
# TODO feels like something itertools might have already
for lbound in range(num_layers):
# Reuse of very large #s of layers is relatively unlikely
# +2: we want sequences of at least 2
# otData handles single-record duplication
for ubound in range(
lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN)
):
yield (lbound, ubound)
class LayerReuseCache:
reusePool: Mapping[Tuple[Any, ...], int]
tuples: Mapping[int, Tuple[Any, ...]]
keepAlive: List[ot.Paint] # we need id to remain valid
def __init__(self):
self.reusePool = {}
self.tuples = {}
self.keepAlive = []
def _paint_tuple(self, paint: ot.Paint):
# start simple, who even cares about cyclic graphs or interesting field types
def _tuple_safe(value):
if isinstance(value, enum.Enum):
return value
elif hasattr(value, "__dict__"):
return tuple(
(k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items())
)
elif isinstance(value, collections.abc.MutableSequence):
return tuple(_tuple_safe(e) for e in value)
return value
# Cache the tuples for individual Paint instead of the whole sequence
# because the seq could be a transient slice
result = self.tuples.get(id(paint), None)
if result is None:
result = _tuple_safe(paint)
self.tuples[id(paint)] = result
self.keepAlive.append(paint)
return result
def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]:
return tuple(self._paint_tuple(p) for p in paints)
def try_reuse(self, layers: List[ot.Paint]) -> List[ot.Paint]:
found_reuse = True
while found_reuse:
found_reuse = False
ranges = sorted(
_reuse_ranges(len(layers)),
key=lambda t: (t[1] - t[0], t[1], t[0]),
reverse=True,
)
for lbound, ubound in ranges:
reuse_lbound = self.reusePool.get(
self._as_tuple(layers[lbound:ubound]), -1
)
if reuse_lbound == -1:
continue
new_slice = ot.Paint()
new_slice.Format = int(ot.PaintFormat.PaintColrLayers)
new_slice.NumLayers = ubound - lbound
new_slice.FirstLayerIndex = reuse_lbound
layers = layers[:lbound] + [new_slice] + layers[ubound:]
found_reuse = True
break
return layers
def add(self, layers: List[ot.Paint], first_layer_index: int):
for lbound, ubound in _reuse_ranges(len(layers)):
self.reusePool[self._as_tuple(layers[lbound:ubound])] = (
lbound + first_layer_index
)
class LayerListBuilder:
layers: List[ot.Paint]
cache: LayerReuseCache
allowLayerReuse: bool
def __init__(self, *, allowLayerReuse=True):
self.layers = []
if allowLayerReuse:
self.cache = LayerReuseCache()
else:
self.cache = None
# We need to intercept construction of PaintColrLayers
callbacks = _buildPaintCallbacks()
callbacks[
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintColrLayers,
)
] = self._beforeBuildPaintColrLayers
self.tableBuilder = TableBuilder(callbacks)
# COLR layers is unusual in that it modifies shared state
# so we need a callback into an object
def _beforeBuildPaintColrLayers(self, dest, source):
# Sketchy gymnastics: a sequence input will have dropped it's layers
# into NumLayers; get it back
if isinstance(source.get("NumLayers", None), collections.abc.Sequence):
layers = source["NumLayers"]
else:
layers = source["Layers"]
# Convert maps seqs or whatever into typed objects
layers = [self.buildPaint(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
if self.cache is not None:
# Look for reuse, with preference to longer sequences
# This may make the layer list smaller
layers = self.cache.try_reuse(layers)
# The layer list is now final; if it's too big we need to tree it
is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT
layers = build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT)
# We now have a tree of sequences with Paint leaves.
# Convert the sequences into PaintColrLayers.
def listToColrLayers(layer):
if isinstance(layer, collections.abc.Sequence):
return self.buildPaint(
{
"Format": ot.PaintFormat.PaintColrLayers,
"Layers": [listToColrLayers(l) for l in layer],
}
)
return layer
layers = [listToColrLayers(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
paint = ot.Paint()
paint.Format = int(ot.PaintFormat.PaintColrLayers)
paint.NumLayers = len(layers)
paint.FirstLayerIndex = len(self.layers)
self.layers.extend(layers)
# Register our parts for reuse provided we aren't a tree
# If we are a tree the leaves registered for reuse and that will suffice
if self.cache is not None and not is_tree:
self.cache.add(layers, paint.FirstLayerIndex)
# we've fully built dest; empty source prevents generalized build from kicking in
return paint, {}
def buildPaint(self, paint: _PaintInput) -> ot.Paint:
return self.tableBuilder.build(ot.Paint, paint)
def build(self) -> Optional[ot.LayerList]:
if not self.layers:
return None
layers = ot.LayerList()
layers.LayerCount = len(self.layers)
layers.Paint = self.layers
return layers
def buildBaseGlyphPaintRecord(
baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput
) -> ot.BaseGlyphList:
self = ot.BaseGlyphPaintRecord()
self.BaseGlyph = baseGlyph
self.Paint = layerBuilder.buildPaint(paint)
return self
def _format_glyph_errors(errors: Mapping[str, Exception]) -> str:
lines = []
for baseGlyph, error in sorted(errors.items()):
lines.append(f" {baseGlyph} => {type(error).__name__}: {error}")
return "\n".join(lines)
def buildColrV1(
colorGlyphs: _ColorGlyphsDict,
glyphMap: Optional[Mapping[str, int]] = None,
*,
allowLayerReuse: bool = True,
) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]:
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphs.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphs.items()
errors = {}
baseGlyphs = []
layerBuilder = LayerListBuilder(allowLayerReuse=allowLayerReuse)
for baseGlyph, paint in colorGlyphItems:
try:
baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint))
except (ColorLibError, OverflowError, ValueError, TypeError) as e:
errors[baseGlyph] = e
if errors:
failed_glyphs = _format_glyph_errors(errors)
exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}")
exc.errors = errors
raise exc from next(iter(errors.values()))
layers = layerBuilder.build()
glyphs = ot.BaseGlyphList()
glyphs.BaseGlyphCount = len(baseGlyphs)
glyphs.BaseGlyphPaintRecord = baseGlyphs
return (layers, glyphs)
"""Helpers for manipulating 2D points and vectors in COLR table."""
from math import copysign, cos, hypot, isclose, pi
from fontTools.misc.roundTools import otRound
def _vector_between(origin, target):
return (target[0] - origin[0], target[1] - origin[1])
def _round_point(pt):
return (otRound(pt[0]), otRound(pt[1]))
def _unit_vector(vec):
length = hypot(*vec)
if length == 0:
return None
return (vec[0] / length, vec[1] / length)
_CIRCLE_INSIDE_TOLERANCE = 1e-4
# The unit vector's X and Y components are respectively
# U = (cos(α), sin(α))
# where α is the angle between the unit vector and the positive x axis.
_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984
def _rounding_offset(direction):
# Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector.
# We divide the unit circle in 8 equal slices oriented towards the cardinal
# (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we
# map one of the possible cases: -1, 0, +1 for either X and Y coordinate.
# E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or
# (-1.0, 0.0) if it's pointing West, etc.
uv = _unit_vector(direction)
if not uv:
return (0, 0)
result = []
for uv_component in uv:
if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD:
# unit vector component near 0: direction almost orthogonal to the
# direction of the current axis, thus keep coordinate unchanged
result.append(0)
else:
# nudge coord by +/- 1.0 in direction of unit vector
result.append(copysign(1.0, uv_component))
return tuple(result)
class Circle:
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def __repr__(self):
return f"Circle(centre={self.centre}, radius={self.radius})"
def round(self):
return Circle(_round_point(self.centre), otRound(self.radius))
def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE):
dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre))
return (
isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE)
or outer_circle.radius > dist
)
def concentric(self, other):
return self.centre == other.centre
def move(self, dx, dy):
self.centre = (self.centre[0] + dx, self.centre[1] + dy)
def round_start_circle_stable_containment(c0, r0, c1, r1):
"""Round start circle so that it stays inside/outside end circle after rounding.
The rounding of circle coordinates to integers may cause an abrupt change
if the start circle c0 is so close to the end circle c1's perimiter that
it ends up falling outside (or inside) as a result of the rounding.
To keep the gradient unchanged, we nudge it in the right direction.
See:
https://github.com/googlefonts/colr-gradients-spec/issues/204
https://github.com/googlefonts/picosvg/issues/158
"""
start, end = Circle(c0, r0), Circle(c1, r1)
inside_before_round = start.inside(end)
round_start = start.round()
round_end = end.round()
inside_after_round = round_start.inside(round_end)
if inside_before_round == inside_after_round:
return round_start
elif inside_after_round:
# start was outside before rounding: we need to push start away from end
direction = _vector_between(round_end.centre, round_start.centre)
radius_delta = +1.0
else:
# start was inside before rounding: we need to push start towards end
direction = _vector_between(round_start.centre, round_end.centre)
radius_delta = -1.0
dx, dy = _rounding_offset(direction)
# At most 2 iterations ought to be enough to converge. Before the loop, we
# know the start circle didn't keep containment after normal rounding; thus
# we continue adjusting by -/+ 1.0 until containment is restored.
# Normal rounding can at most move each coordinates -/+0.5; in the worst case
# both the start and end circle's centres and radii will be rounded in opposite
# directions, e.g. when they move along a 45 degree diagonal:
# c0 = (1.5, 1.5) ===> (2.0, 2.0)
# r0 = 0.5 ===> 1.0
# c1 = (0.499, 0.499) ===> (0.0, 0.0)
# r1 = 2.499 ===> 2.0
# In this example, the relative distance between the circles, calculated
# as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and
# -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both
# x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these
# moves cover twice that distance, which is enough to restore containment.
max_attempts = 2
for _ in range(max_attempts):
if round_start.concentric(round_end):
# can't move c0 towards c1 (they are the same), so we change the radius
round_start.radius += radius_delta
assert round_start.radius >= 0
else:
round_start.move(dx, dy)
if inside_before_round == round_start.inside(round_end):
break
else: # likely a bug
raise AssertionError(
f"Rounding circle {start} "
f"{'inside' if inside_before_round else 'outside'} "
f"{end} failed after {max_attempts} attempts!"
)
return round_start
"""
colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such.
"""
import collections
import enum
from fontTools.ttLib.tables.otBase import (
BaseTable,
FormatSwitchingBaseTable,
UInt8FormatSwitchingBaseTable,
)
from fontTools.ttLib.tables.otConverters import (
ComputedInt,
SimpleValue,
Struct,
Short,
UInt8,
UShort,
IntValue,
FloatValue,
OptionalValue,
)
from fontTools.misc.roundTools import otRound
class BuildCallback(enum.Enum):
"""Keyed on (BEFORE_BUILD, class[, Format if available]).
Receives (dest, source).
Should return (dest, source), which can be new objects.
"""
BEFORE_BUILD = enum.auto()
"""Keyed on (AFTER_BUILD, class[, Format if available]).
Receives (dest).
Should return dest, which can be a new object.
"""
AFTER_BUILD = enum.auto()
"""Keyed on (CREATE_DEFAULT, class[, Format if available]).
Receives no arguments.
Should return a new instance of class.
"""
CREATE_DEFAULT = enum.auto()
def _assignable(convertersByName):
return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)}
def _isNonStrSequence(value):
return isinstance(value, collections.abc.Sequence) and not isinstance(value, str)
def _split_format(cls, source):
if _isNonStrSequence(source):
assert len(source) > 0, f"{cls} needs at least format from {source}"
fmt, remainder = source[0], source[1:]
elif isinstance(source, collections.abc.Mapping):
assert "Format" in source, f"{cls} needs at least Format from {source}"
remainder = source.copy()
fmt = remainder.pop("Format")
else:
raise ValueError(f"Not sure how to populate {cls} from {source}")
assert isinstance(
fmt, collections.abc.Hashable
), f"{cls} Format is not hashable: {fmt!r}"
assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
return fmt, remainder
class TableBuilder:
"""
Helps to populate things derived from BaseTable from maps, tuples, etc.
A table of lifecycle callbacks may be provided to add logic beyond what is possible
based on otData info for the target class. See BuildCallbacks.
"""
def __init__(self, callbackTable=None):
if callbackTable is None:
callbackTable = {}
self._callbackTable = callbackTable
def _convert(self, dest, field, converter, value):
enumClass = getattr(converter, "enumClass", None)
if enumClass:
if isinstance(value, enumClass):
pass
elif isinstance(value, str):
try:
value = getattr(enumClass, value.upper())
except AttributeError:
raise ValueError(f"{value} is not a valid {enumClass}")
else:
value = enumClass(value)
elif isinstance(converter, IntValue):
value = otRound(value)
elif isinstance(converter, FloatValue):
value = float(value)
elif isinstance(converter, Struct):
if converter.repeat:
if _isNonStrSequence(value):
value = [self.build(converter.tableClass, v) for v in value]
else:
value = [self.build(converter.tableClass, value)]
setattr(dest, converter.repeat, len(value))
else:
value = self.build(converter.tableClass, value)
elif callable(converter):
value = converter(value)
setattr(dest, field, value)
def build(self, cls, source):
assert issubclass(cls, BaseTable)
if isinstance(source, cls):
return source
callbackKey = (cls,)
fmt = None
if issubclass(cls, FormatSwitchingBaseTable):
fmt, source = _split_format(cls, source)
callbackKey = (cls, fmt)
dest = self._callbackTable.get(
(BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls()
)()
assert isinstance(dest, cls)
convByName = _assignable(cls.convertersByName)
skippedFields = set()
# For format switchers we need to resolve converters based on format
if issubclass(cls, FormatSwitchingBaseTable):
dest.Format = fmt
convByName = _assignable(convByName[dest.Format])
skippedFields.add("Format")
# Convert sequence => mapping so before thunk only has to handle one format
if _isNonStrSequence(source):
# Sequence (typically list or tuple) assumed to match fields in declaration order
assert len(source) <= len(
convByName
), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values"
source = dict(zip(convByName.keys(), source))
dest, source = self._callbackTable.get(
(BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s)
)(dest, source)
if isinstance(source, collections.abc.Mapping):
for field, value in source.items():
if field in skippedFields:
continue
converter = convByName.get(field, None)
if not converter:
raise ValueError(
f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}"
)
self._convert(dest, field, converter, value)
else:
# let's try as a 1-tuple
dest = self.build(cls, (source,))
for field, conv in convByName.items():
if not hasattr(dest, field) and isinstance(conv, OptionalValue):
setattr(dest, field, conv.DEFAULT)
dest = self._callbackTable.get(
(BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d
)(dest)
return dest
class TableUnbuilder:
def __init__(self, callbackTable=None):
if callbackTable is None:
callbackTable = {}
self._callbackTable = callbackTable
def unbuild(self, table):
assert isinstance(table, BaseTable)
source = {}
callbackKey = (type(table),)
if isinstance(table, FormatSwitchingBaseTable):
source["Format"] = int(table.Format)
callbackKey += (table.Format,)
for converter in table.getConverters():
if isinstance(converter, ComputedInt):
continue
value = getattr(table, converter.name)
enumClass = getattr(converter, "enumClass", None)
if enumClass:
source[converter.name] = value.name.lower()
elif isinstance(converter, Struct):
if converter.repeat:
source[converter.name] = [self.unbuild(v) for v in value]
else:
source[converter.name] = self.unbuild(value)
elif isinstance(converter, SimpleValue):
# "simple" values (e.g. int, float, str) need no further un-building
source[converter.name] = value
else:
raise NotImplementedError(
"Don't know how unbuild {value!r} with {converter!r}"
)
source = self._callbackTable.get(callbackKey, lambda s: s)(source)
return source
"""
Define all configuration options that can affect the working of fontTools
modules. E.g. optimization levels of varLib IUP, otlLib GPOS compression level,
etc. If this file gets too big, split it into smaller files per-module.
An instance of the Config class can be attached to a TTFont object, so that
the various modules can access their configuration options from it.
"""
from textwrap import dedent
from fontTools.misc.configTools import *
class Config(AbstractConfig):
options = Options()
OPTIONS = Config.options
Config.register_option(
name="fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
help=dedent(
"""\
GPOS Lookup type 2 (PairPos) compression level:
0 = do not attempt to compact PairPos lookups;
1 to 8 = create at most 1 to 8 new subtables for each existing
subtable, provided that it would yield a 50%% file size saving;
9 = create as many new subtables as needed to yield a file size saving.
Default: 0.
This compaction aims to save file size, by splitting large class
kerning subtables (Format 2) that contain many zero values into
smaller and denser subtables. It's a trade-off between the overhead
of several subtables versus the sparseness of one big subtable.
See the pull request: https://github.com/fonttools/fonttools/pull/2326
"""
),
default=0,
parse=int,
validate=lambda v: v in range(10),
)
Config.register_option(
name="fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER",
help=dedent(
"""\
FontTools tries to use the HarfBuzz Repacker to serialize GPOS/GSUB tables
if the uharfbuzz python bindings are importable, otherwise falls back to its
slower, less efficient serializer. Set to False to always use the latter.
Set to True to explicitly request the HarfBuzz Repacker (will raise an
error if uharfbuzz cannot be imported).
"""
),
default=None,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
Config.register_option(
name="fontTools.otlLib.builder:WRITE_GPOS7",
help=dedent(
"""\
macOS before 13.2 didn’t support GPOS LookupType 7 (non-chaining
ContextPos lookups), so FontTools.otlLib.builder disables a file size
optimisation that would use LookupType 7 instead of 8 when there is no
chaining (no prefix or suffix). Set to True to enable the optimization.
"""
),
default=False,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
Config.register_option(
name="fontTools.ttLib:OPTIMIZE_FONT_SPEED",
help=dedent(
"""\
Enable optimizations that prioritize speed over file size. This
mainly affects how glyf table and gvar / VARC tables are compiled.
The produced fonts will be larger, but rendering performance will
be improved with HarfBuzz and other text layout engines.
"""
),
default=False,
parse=Option.parse_optional_bool,
validate=Option.validate_optional_bool,
)
"""Benchmark the cu2qu algorithm performance."""
from .cu2qu import *
import random
import timeit
MAX_ERR = 0.05
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)
]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000):
setup_func = "setup_" + function
if setup_suffix:
print("%s with %s:" % (function, setup_suffix), end="")
setup_func += "_" + setup_suffix
else:
print("%s:" % function, end="")
def wrapper(function, setup_func):
function = globals()[function]
setup_func = globals()[setup_func]
def wrapped():
return function(*setup_func())
return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main():
run_benchmark("cu2qu", "curve_to_quadratic")
run_benchmark("cu2qu", "curves_to_quadratic")
if __name__ == "__main__":
random.seed(1)
main()
venv\Lib\site-packages\fontTools\cu2qu\cli.py
import os
import argparse
import logging
import shutil
import multiprocessing as mp
from contextlib import closing
from functools import partial
import fontTools
from .ufo import font_to_quadratic, fonts_to_quadratic
ufo_module = None
try:
import ufoLib2 as ufo_module
except ImportError:
try:
import defcon as ufo_module
except ImportError as e:
pass
logger = logging.getLogger("fontTools.cu2qu")
def _cpu_count():
try:
return mp.cpu_count()
except NotImplementedError: # pragma: no cover
return 1
def open_ufo(path):
if hasattr(ufo_module.Font, "open"): # ufoLib2
return ufo_module.Font.open(path)
return ufo_module.Font(path) # defcon
def _font_to_quadratic(input_path, output_path=None, **kwargs):
ufo = open_ufo(input_path)
logger.info("Converting curves for %s", input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
if output_path:
ufo.save(output_path)
else:
ufo.save() # save in-place
elif output_path:
_copytree(input_path, output_path)
def _samepath(path1, path2):
# TODO on python3+, there's os.path.samefile
path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
return path1 == path2
def _copytree(input_path, output_path):
if _samepath(input_path, output_path):
logger.debug("input and output paths are the same file; skipped copy")
return
if os.path.exists(output_path):
shutil.rmtree(output_path)
shutil.copytree(input_path, output_path)
def _main(args=None):
"""Convert a UFO font from cubic to quadratic curves"""
parser = argparse.ArgumentParser(prog="cu2qu")
parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input UFO source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
"--conversion-error",
type=float,
metavar="ERROR",
default=None,
help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"-m",
"--mixed",
default=False,
action="store_true",
help="whether to used mixed quadratic and cubic curves",
)
parser.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
help="do not reverse the contour direction",
)
mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument(
"-i",
"--interpolatable",
action="store_true",
help="whether curve conversion should keep interpolation compatibility",
)
mode_parser.add_argument(
"-j",
"--jobs",
type=int,
nargs="?",
default=1,
const=_cpu_count(),
metavar="N",
help="Convert using N multiple processes (default: %(default)s)",
)
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
"-o",
"--output-file",
default=None,
metavar="OUTPUT",
help=(
"output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."
),
)
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted UFOs",
)
options = parser.parse_args(args)
if ufo_module is None:
parser.error("Either ufoLib2 or defcon are required to run this script.")
if not options.verbose:
level = "WARNING"
elif options.verbose == 1:
level = "INFO"
else:
level = "DEBUG"
logging.basicConfig(level=level)
if len(options.infiles) > 1 and options.output_file:
parser.error("-o/--output-file can't be used with multile inputs")
if options.output_dir:
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
else:
# save in-place
output_paths = [None] * len(options.infiles)
kwargs = dict(
dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
reverse_direction=options.reverse_direction,
all_quadratic=False if options.mixed else True,
)
if options.interpolatable:
logger.info("Converting curves compatibly")
ufos = [open_ufo(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, **kwargs):
for ufo, output_path in zip(ufos, output_paths):
logger.info("Saving %s", output_path)
if output_path:
ufo.save(output_path)
else:
ufo.save()
else:
for input_path, output_path in zip(options.infiles, output_paths):
if output_path:
_copytree(input_path, output_path)
else:
jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
if jobs > 1:
func = partial(_font_to_quadratic, **kwargs)
logger.info("Running %d parallel processes", jobs)
with closing(mp.Pool(jobs)) as pool:
pool.starmap(func, zip(options.infiles, output_paths))
else:
for input_path, output_path in zip(options.infiles, output_paths):
_font_to_quadratic(input_path, output_path, **kwargs)
venv\Lib\site-packages\fontTools\cu2qu\cu2qu.py
# cython: language_level=3
# distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cython
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = cython.compiled
import math
from .errors import Error as Cu2QuError, ApproxNotFoundError
__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
MAX_N = 100
NAN = float("NaN")
@cython.cfunc
@cython.inline
@cython.returns(cython.double)
@cython.locals(v1=cython.complex, v2=cython.complex)
def dot(v1, v2):
"""Return the dot product of two vectors.
Args:
v1 (complex): First vector.
v2 (complex): Second vector.
Returns:
double: Dot product.
"""
return (v1 * v2.conjugate()).real
@cython.cfunc
@cython.inline
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
)
def calc_cubic_points(a, b, c, d):
_1 = d
_2 = (c / 3.0) + d
_3 = (b + c) / 3.0 + _2
_4 = a + d + c + b
return _1, _2, _3, _4
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
def calc_cubic_parameters(p0, p1, p2, p3):
c = (p1 - p0) * 3.0
b = (p2 - p1) * 3.0 - c
d = p0
a = p3 - d - c - b
return a, b, c, d
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
"""Split a cubic Bezier into n equal parts.
Splits the curve into `n` equal parts by curve time.
(t=0..1/n, t=1/n..2/n, ...)
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
An iterator yielding the control points (four complex values) of the
subcurves.
"""
# Hand-coded special-cases
if n == 2:
return iter(split_cubic_into_two(p0, p1, p2, p3))
if n == 3:
return iter(split_cubic_into_three(p0, p1, p2, p3))
if n == 4:
a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(
split_cubic_into_two(a[0], a[1], a[2], a[3])
+ split_cubic_into_two(b[0], b[1], b[2], b[3])
)
if n == 6:
a, b = split_cubic_into_two(p0, p1, p2, p3)
return iter(
split_cubic_into_three(a[0], a[1], a[2], a[3])
+ split_cubic_into_three(b[0], b[1], b[2], b[3])
)
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
n=cython.int,
)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(
dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
)
@cython.locals(
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
)
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
dt = 1 / n
delta_2 = dt * dt
delta_3 = dt * delta_2
for i in range(n):
t1 = i * dt
t1_2 = t1 * t1
# calc new a, b, c and d
a1 = a * delta_3
b1 = (3 * a * t1 + b) * delta_2
c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
yield calc_cubic_points(a1, b1, c1, d1)
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def split_cubic_into_two(p0, p1, p2, p3):
"""Split a cubic Bezier into two equal parts.
Splits the curve into two equal parts at t = 0.5
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
tuple: Two cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return (
(p0, (p0 + p1) * 0.5, mid - deriv3, mid),
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
)
@cython.cfunc
@cython.inline
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(
mid1=cython.complex,
deriv1=cython.complex,
mid2=cython.complex,
deriv2=cython.complex,
)
def split_cubic_into_three(p0, p1, p2, p3):
"""Split a cubic Bezier into three equal parts.
Splits the curve into three equal parts at t = 1/3 and t = 2/3
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
tuple: Three cubic Beziers (each expressed as a tuple of four complex
values).
"""
mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27)
deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27)
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27)
deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27)
return (
(p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
)
@cython.cfunc
@cython.inline
@cython.returns(cython.complex)
@cython.locals(
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(_p1=cython.complex, _p2=cython.complex)
def cubic_approx_control(t, p0, p1, p2, p3):
"""Approximate a cubic Bezier using a quadratic one.
Args:
t (double): Position of control point.
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
Returns:
complex: Location of candidate control point on quadratic curve.
"""
_p1 = p0 + (p1 - p0) * 1.5
_p2 = p3 + (p2 - p3) * 1.5
return _p1 + (_p2 - _p1) * t
@cython.cfunc
@cython.inline
@cython.returns(cython.complex)
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
@cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double)
def calc_intersect(a, b, c, d):
"""Calculate the intersection of two lines.
Args:
a (complex): Start point of first line.
b (complex): End point of first line.
c (complex): Start point of second line.
d (complex): End point of second line.
Returns:
complex: Location of intersection if one present, ``complex(NaN,NaN)``
if no intersection was found.
"""
ab = b - a
cd = d - c
p = ab * 1j
try:
h = dot(p, a - c) / dot(p, cd)
except ZeroDivisionError:
return complex(NAN, NAN)
return c + cd * h
@cython.cfunc
@cython.returns(cython.int)
@cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
"Origin" means *the* origin (0,0), not the start of the curve. Note that no
checks are made on the start and end positions of the curve; this function
only checks the inside of the curve.
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
tolerance (double): Distance from origin.
Returns:
bool: True if the cubic Bezier ``p`` entirely lies within a distance
``tolerance`` of the origin, False otherwise.
"""
# First check p2 then p1, as p2 has higher error early on.
if abs(p2) <= tolerance and abs(p1) <= tolerance:
return True
# Split.
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return cubic_farthest_fit_inside(
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.cfunc
@cython.inline
@cython.locals(tolerance=cython.double)
@cython.locals(
q1=cython.complex,
c0=cython.complex,
c1=cython.complex,
c2=cython.complex,
c3=cython.complex,
)
def cubic_approx_quadratic(cubic, tolerance):
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
Args:
cubic (sequence): Four complex numbers representing control points of
the cubic Bezier curve.
tolerance (double): Permitted deviation from the original curve.
Returns:
Three complex numbers representing control points of the quadratic
curve if it fits within the given tolerance, or ``None`` if no suitable
curve could be calculated.
"""
q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3])
if math.isnan(q1.imag):
return None
c0 = cubic[0]
c3 = cubic[3]
c1 = c0 + (q1 - c0) * (2 / 3)
c2 = c3 + (q1 - c3) * (2 / 3)
if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
return None
return c0, q1, c3
@cython.cfunc
@cython.locals(n=cython.int, tolerance=cython.double)
@cython.locals(i=cython.int)
@cython.locals(all_quadratic=cython.int)
@cython.locals(
c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
)
@cython.locals(
q0=cython.complex,
q1=cython.complex,
next_q1=cython.complex,
q2=cython.complex,
d1=cython.complex,
)
def cubic_approx_spline(cubic, n, tolerance, all_quadratic):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
cubic (sequence): Four complex numbers representing control points of
the cubic Bezier curve.
n (int): Number of quadratic Bezier curves in the spline.
tolerance (double): Permitted deviation from the original curve.
Returns:
A list of ``n+2`` complex numbers, representing control points of the
quadratic spline if it fits within the given tolerance, or ``None`` if
no suitable spline could be calculated.
"""
if n == 1:
return cubic_approx_quadratic(cubic, tolerance)
if n == 2 and all_quadratic == False:
return cubic
cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n)
# calculate the spline of quadratics and check errors at the same time.
next_cubic = next(cubics)
next_q1 = cubic_approx_control(
0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
)
q2 = cubic[0]
d1 = 0j
spline = [cubic[0], next_q1]
for i in range(1, n + 1):
# Current cubic to convert
c0, c1, c2, c3 = next_cubic
# Current quadratic approximation of current cubic
q0 = q2
q1 = next_q1
if i < n:
next_cubic = next(cubics)
next_q1 = cubic_approx_control(
i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
)
spline.append(next_q1)
q2 = (q1 + next_q1) * 0.5
else:
q2 = c3
# End-point deltas
d0 = d1
d1 = q2 - c3
if abs(d1) > tolerance or not cubic_farthest_fit_inside(
d0,
q0 + (q1 - q0) * (2 / 3) - c1,
q2 + (q1 - q2) * (2 / 3) - c2,
d1,
tolerance,
):
return None
spline.append(cubic[3])
return spline
@cython.locals(max_err=cython.double)
@cython.locals(n=cython.int)
@cython.locals(all_quadratic=cython.int)
def curve_to_quadratic(curve, max_err, all_quadratic=True):
"""Approximate a cubic Bezier curve with a spline of n quadratics.
Args:
cubic (sequence): Four 2D tuples representing control points of
the cubic Bezier curve.
max_err (double): Permitted deviation from the original curve.
all_quadratic (bool): If True (default) returned value is a
quadratic spline. If False, it's either a single quadratic
curve or a single cubic curve.
Returns:
If all_quadratic is True: A list of 2D tuples, representing
control points of the quadratic spline if it fits within the
given tolerance, or ``None`` if no suitable spline could be
calculated.
If all_quadratic is False: Either a quadratic curve (if length
of output is 3), or a cubic curve (if length of output is 4).
"""
curve = [complex(*p) for p in curve]
for n in range(1, MAX_N + 1):
spline = cubic_approx_spline(curve, n, max_err, all_quadratic)
if spline is not None:
# done. go home
return [(s.real, s.imag) for s in spline]
raise ApproxNotFoundError(curve)
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
@cython.locals(all_quadratic=cython.int)
def curves_to_quadratic(curves, max_errors, all_quadratic=True):
"""Return quadratic Bezier splines approximating the input cubic Beziers.
Args:
curves: A sequence of *n* curves, each curve being a sequence of four
2D tuples.
max_errors: A sequence of *n* floats representing the maximum permissible
deviation from each of the cubic Bezier curves.
all_quadratic (bool): If True (default) returned values are a
quadratic spline. If False, they are either a single quadratic
curve or a single cubic curve.
Example::
>>> curves_to_quadratic( [
... [ (50,50), (100,100), (150,100), (200,50) ],
... [ (75,50), (120,100), (150,75), (200,60) ]
... ], [1,1] )
[[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]]
The returned splines have "implied oncurve points" suitable for use in
TrueType ``glif`` outlines - i.e. in the first spline returned above,
the first quadratic segment runs from (50,50) to
( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).
Returns:
If all_quadratic is True, a list of splines, each spline being a list
of 2D tuples.
If all_quadratic is False, a list of curves, each curve being a quadratic
(length 3), or cubic (length 4).
Raises:
fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation
can be found for all curves with the given parameters.
"""
curves = [[complex(*p) for p in curve] for curve in curves]
assert len(max_errors) == len(curves)
l = len(curves)
splines = [None] * l
last_i = i = 0
n = 1
while True:
spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic)
if spline is None:
if n == MAX_N:
break
n += 1
last_i = i
continue
splines[i] = spline
i = (i + 1) % l
if i == last_i:
# done. go home
return [[(s.real, s.imag) for s in spline] for spline in splines]
raise ApproxNotFoundError(curves)
venv\Lib\site-packages\fontTools\cu2qu\errors.py
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Error(Exception):
"""Base Cu2Qu exception class for all other errors."""
class ApproxNotFoundError(Error):
def __init__(self, curve):
message = "no approximation found: %s" % curve
super().__init__(message)
self.curve = curve
class UnequalZipLengthsError(Error):
pass
class IncompatibleGlyphsError(Error):
def __init__(self, glyphs):
assert len(glyphs) > 1
self.glyphs = glyphs
names = set(repr(g.name) for g in glyphs)
if len(names) > 1:
self.combined_name = "{%s}" % ", ".join(sorted(names))
else:
self.combined_name = names.pop()
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.combined_name)
class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
def __str__(self):
return "Glyphs named %s have different number of segments" % (
self.combined_name
)
class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
def __init__(self, glyphs, segments):
IncompatibleGlyphsError.__init__(self, glyphs)
self.segments = segments
def __str__(self):
lines = []
ndigits = len(str(max(self.segments)))
for i, tags in sorted(self.segments.items()):
lines.append(
"%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
)
return "Glyphs named %s have incompatible segment types:\n %s" % (
self.combined_name,
"\n ".join(lines),
)
class IncompatibleFontsError(Error):
def __init__(self, glyph_errors):
self.glyph_errors = glyph_errors
def __str__(self):
return "fonts contains incompatible glyphs: %s" % (
", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
)
venv\Lib\site-packages\fontTools\cu2qu\ufo.py
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts cubic bezier curves to quadratic splines.
Conversion is performed such that the quadratic splines keep the same end-curve
tangents as the original cubics. The approach is iterative, increasing the
number of segments for a spline until the error gets below a bound.
Respective curves from multiple fonts will be converted at once to ensure that
the resulting splines are interpolation-compatible.
"""
import logging
from fontTools.pens.basePen import AbstractPen
from fontTools.pens.pointPen import PointToSegmentPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from . import curves_to_quadratic
from .errors import (
UnequalZipLengthsError,
IncompatibleSegmentNumberError,
IncompatibleSegmentTypesError,
IncompatibleGlyphsError,
IncompatibleFontsError,
)
__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
# The default approximation error below is a relative value (1/1000 of the EM square).
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
# (see fonts_to_quadratic).
DEFAULT_MAX_ERR = 0.001
CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type"
logger = logging.getLogger(__name__)
_zip = zip
def zip(*args):
"""Ensure each argument to zip has the same length. Also make sure a list is
returned for python 2/3 compatibility.
"""
if len(set(len(a) for a in args)) != 1:
raise UnequalZipLengthsError(*args)
return list(_zip(*args))
class GetSegmentsPen(AbstractPen):
"""Pen to collect segments into lists of points for conversion.
Curves always include their initial on-curve point, so some points are
duplicated between segments.
"""
def __init__(self):
self._last_pt = None
self.segments = []
def _add_segment(self, tag, *args):
if tag in ["move", "line", "qcurve", "curve"]:
self._last_pt = args[-1]
self.segments.append((tag, args))
def moveTo(self, pt):
self._add_segment("move", pt)
def lineTo(self, pt):
self._add_segment("line", pt)
def qCurveTo(self, *points):
self._add_segment("qcurve", self._last_pt, *points)
def curveTo(self, *points):
self._add_segment("curve", self._last_pt, *points)
def closePath(self):
self._add_segment("close")
def endPath(self):
self._add_segment("end")
def addComponent(self, glyphName, transformation):
pass
def _get_segments(glyph):
"""Get a glyph's segments as extracted by GetSegmentsPen."""
pen = GetSegmentsPen()
# glyph.draw(pen)
# We can't simply draw the glyph with the pen, but we must initialize the
# PointToSegmentPen explicitly with outputImpliedClosingLine=True.
# By default PointToSegmentPen does not outputImpliedClosingLine -- unless
# last and first point on closed contour are duplicated. Because we are
# converting multiple glyphs at the same time, we want to make sure
# this function returns the same number of segments, whether or not
# the last and first point overlap.
# https://github.com/googlefonts/fontmake/issues/572
# https://github.com/fonttools/fonttools/pull/1720
pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True)
glyph.drawPoints(pointPen)
return pen.segments
def _set_segments(glyph, segments, reverse_direction):
"""Draw segments as extracted by GetSegmentsPen back to a glyph."""
glyph.clearContours()
pen = glyph.getPen()
if reverse_direction:
pen = ReverseContourPen(pen)
for tag, args in segments:
if tag == "move":
pen.moveTo(*args)
elif tag == "line":
pen.lineTo(*args)
elif tag == "curve":
pen.curveTo(*args[1:])
elif tag == "qcurve":
pen.qCurveTo(*args[1:])
elif tag == "close":
pen.closePath()
elif tag == "end":
pen.endPath()
else:
raise AssertionError('Unhandled segment type "%s"' % tag)
def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True):
"""Return quadratic approximations of cubic segments."""
assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic)
n = len(new_points[0])
assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
spline_length = str(n - 2)
stats[spline_length] = stats.get(spline_length, 0) + 1
if all_quadratic or n == 3:
return [("qcurve", p) for p in new_points]
else:
return [("curve", p) for p in new_points]
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True):
"""Do the actual conversion of a set of compatible glyphs, after arguments
have been set up.
Return True if the glyphs were modified, else return False.
"""
try:
segments_by_location = zip(*[_get_segments(g) for g in glyphs])
except UnequalZipLengthsError:
raise IncompatibleSegmentNumberError(glyphs)
if not any(segments_by_location):
return False
# always modify input glyphs if reverse_direction is True
glyphs_modified = reverse_direction
new_segments_by_location = []
incompatible = {}
for i, segments in enumerate(segments_by_location):
tag = segments[0][0]
if not all(s[0] == tag for s in segments[1:]):
incompatible[i] = [s[0] for s in segments]
elif tag == "curve":
new_segments = _segments_to_quadratic(
segments, max_err, stats, all_quadratic
)
if all_quadratic or new_segments != segments:
glyphs_modified = True
segments = new_segments
new_segments_by_location.append(segments)
if glyphs_modified:
new_segments_by_glyph = zip(*new_segments_by_location)
for glyph, new_segments in zip(glyphs, new_segments_by_glyph):
_set_segments(glyph, new_segments, reverse_direction)
if incompatible:
raise IncompatibleSegmentTypesError(glyphs, segments=incompatible)
return glyphs_modified
def glyphs_to_quadratic(
glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True
):
"""Convert the curves of a set of compatible of glyphs to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
compatibility. If this is not required, calling glyphs_to_quadratic with one
glyph at a time may yield slightly more optimized results.
Return True if glyphs were modified, else return False.
Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines.
"""
if stats is None:
stats = {}
if not max_err:
# assume 1000 is the default UPEM
max_err = DEFAULT_MAX_ERR * 1000
if isinstance(max_err, (list, tuple)):
max_errors = max_err
else:
max_errors = [max_err] * len(glyphs)
assert len(max_errors) == len(glyphs)
return _glyphs_to_quadratic(
glyphs, max_errors, reverse_direction, stats, all_quadratic
)
def fonts_to_quadratic(
fonts,
max_err_em=None,
max_err=None,
reverse_direction=False,
stats=None,
dump_stats=False,
remember_curve_type=True,
all_quadratic=True,
):
"""Convert the curves of a collection of fonts to quadratic.
All curves will be converted to quadratic at once, ensuring interpolation
compatibility. If this is not required, calling fonts_to_quadratic with one
font at a time may yield slightly more optimized results.
Return the set of modified glyph names if any, else return an empty set.
By default, cu2qu stores the curve type in the fonts' lib, under a private
key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert
them again if the curve type is already set to "quadratic".
Setting 'remember_curve_type' to False disables this optimization.
Raises IncompatibleFontsError if same-named glyphs from different fonts
have non-interpolatable outlines.
"""
if remember_curve_type:
curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts}
if len(curve_types) == 1:
curve_type = next(iter(curve_types))
if curve_type in ("quadratic", "mixed"):
logger.info("Curves already converted to quadratic")
return False
elif curve_type == "cubic":
pass # keep converting
else:
raise NotImplementedError(curve_type)
elif len(curve_types) > 1:
# going to crash later if they do differ
logger.warning("fonts may contain different curve types")
if stats is None:
stats = {}
if max_err_em and max_err:
raise TypeError("Only one of max_err and max_err_em can be specified.")
if not (max_err_em or max_err):
max_err_em = DEFAULT_MAX_ERR
if isinstance(max_err, (list, tuple)):
assert len(max_err) == len(fonts)
max_errors = max_err
elif max_err:
max_errors = [max_err] * len(fonts)
if isinstance(max_err_em, (list, tuple)):
assert len(fonts) == len(max_err_em)
max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
elif max_err_em:
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
modified = set()
glyph_errors = {}
for name in set().union(*(f.keys() for f in fonts)):
glyphs = []
cur_max_errors = []
for font, error in zip(fonts, max_errors):
if name in font:
glyphs.append(font[name])
cur_max_errors.append(error)
try:
if _glyphs_to_quadratic(
glyphs, cur_max_errors, reverse_direction, stats, all_quadratic
):
modified.add(name)
except IncompatibleGlyphsError as exc:
logger.error(exc)
glyph_errors[name] = exc
if glyph_errors:
raise IncompatibleFontsError(glyph_errors)
if modified and dump_stats:
spline_lengths = sorted(stats.keys())
logger.info(
"New spline lengths: %s"
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
)
if remember_curve_type:
for font in fonts:
curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic")
new_curve_type = "quadratic" if all_quadratic else "mixed"
if curve_type != new_curve_type:
font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type
return modified
def glyph_to_quadratic(glyph, **kwargs):
"""Convenience wrapper around glyphs_to_quadratic, for just one glyph.
Return True if the glyph was modified, else return False.
"""
return glyphs_to_quadratic([glyph], **kwargs)
def font_to_quadratic(font, **kwargs):
"""Convenience wrapper around fonts_to_quadratic, for just one font.
Return the set of modified glyph names if any, else return empty set.
"""
return fonts_to_quadratic([font], **kwargs)
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cu2qu import *
"""Allows building all the variable fonts of a DesignSpace version 5 by
splitting the document into interpolable sub-space, then into each VF.
"""
from __future__ import annotations
import itertools
import logging
import math
from typing import Any, Callable, Dict, Iterator, List, Tuple, cast
from fontTools.designspaceLib import (
AxisDescriptor,
AxisMappingDescriptor,
DesignSpaceDocument,
DiscreteAxisDescriptor,
InstanceDescriptor,
RuleDescriptor,
SimpleLocationDict,
SourceDescriptor,
VariableFontDescriptor,
)
from fontTools.designspaceLib.statNames import StatNames, getStatNames
from fontTools.designspaceLib.types import (
ConditionSet,
Range,
Region,
getVFUserRegion,
locationInRegion,
regionInRegion,
userRegionToDesignRegion,
)
LOGGER = logging.getLogger(__name__)
MakeInstanceFilenameCallable = Callable[
[DesignSpaceDocument, InstanceDescriptor, StatNames], str
]
def defaultMakeInstanceFilename(
doc: DesignSpaceDocument, instance: InstanceDescriptor, statNames: StatNames
) -> str:
"""Default callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overriden
because it's not specified by the STAT table.
"""
familyName = instance.familyName or statNames.familyNames.get("en")
styleName = instance.styleName or statNames.styleNames.get("en")
return f"{familyName}-{styleName}.ttf"
def splitInterpolable(
doc: DesignSpaceDocument,
makeNames: bool = True,
expandLocations: bool = True,
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
) -> Iterator[Tuple[SimpleLocationDict, DesignSpaceDocument]]:
"""Split the given DS5 into several interpolable sub-designspaces.
There are as many interpolable sub-spaces as there are combinations of
discrete axis values.
E.g. with axes:
- italic (discrete) Upright or Italic
- style (discrete) Sans or Serif
- weight (continuous) 100 to 900
There are 4 sub-spaces in which the Weight axis should interpolate:
(Upright, Sans), (Upright, Serif), (Italic, Sans) and (Italic, Serif).
The sub-designspaces still include the full axis definitions and STAT data,
but the rules, sources, variable fonts, instances are trimmed down to only
keep what falls within the interpolable sub-space.
Args:
- ``makeNames``: Whether to compute the instance family and style
names using the STAT data.
- ``expandLocations``: Whether to turn all locations into "full"
locations, including implicit default axis values where missing.
- ``makeInstanceFilename``: Callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overridden
because it's not specified by the STAT table.
.. versionadded:: 5.0
"""
discreteAxes = []
interpolableUserRegion: Region = {}
for axis in doc.axes:
if hasattr(axis, "values"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(DiscreteAxisDescriptor, axis)
discreteAxes.append(axis)
else:
axis = cast(AxisDescriptor, axis)
interpolableUserRegion[axis.name] = Range(
axis.minimum,
axis.maximum,
axis.default,
)
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
for values in valueCombinations:
discreteUserLocation = {
discreteAxis.name: value
for discreteAxis, value in zip(discreteAxes, values)
}
subDoc = _extractSubSpace(
doc,
{**interpolableUserRegion, **discreteUserLocation},
keepVFs=True,
makeNames=makeNames,
expandLocations=expandLocations,
makeInstanceFilename=makeInstanceFilename,
)
yield discreteUserLocation, subDoc
def splitVariableFonts(
doc: DesignSpaceDocument,
makeNames: bool = False,
expandLocations: bool = False,
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
) -> Iterator[Tuple[str, DesignSpaceDocument]]:
"""Convert each variable font listed in this document into a standalone
designspace. This can be used to compile all the variable fonts from a
format 5 designspace using tools that can only deal with 1 VF at a time.
Args:
- ``makeNames``: Whether to compute the instance family and style
names using the STAT data.
- ``expandLocations``: Whether to turn all locations into "full"
locations, including implicit default axis values where missing.
- ``makeInstanceFilename``: Callable to synthesize an instance filename
when makeNames=True, for instances that don't specify an instance name
in the designspace. This part of the name generation can be overridden
because it's not specified by the STAT table.
.. versionadded:: 5.0
"""
# Make one DesignspaceDoc v5 for each variable font
for vf in doc.getVariableFonts():
vfUserRegion = getVFUserRegion(doc, vf)
vfDoc = _extractSubSpace(
doc,
vfUserRegion,
keepVFs=False,
makeNames=makeNames,
expandLocations=expandLocations,
makeInstanceFilename=makeInstanceFilename,
)
vfDoc.lib = {**vfDoc.lib, **vf.lib}
yield vf.name, vfDoc
def convert5to4(
doc: DesignSpaceDocument,
) -> Dict[str, DesignSpaceDocument]:
"""Convert each variable font listed in this document into a standalone
format 4 designspace. This can be used to compile all the variable fonts
from a format 5 designspace using tools that only know about format 4.
.. versionadded:: 5.0
"""
vfs = {}
for _location, subDoc in splitInterpolable(doc):
for vfName, vfDoc in splitVariableFonts(subDoc):
vfDoc.formatVersion = "4.1"
vfs[vfName] = vfDoc
return vfs
def _extractSubSpace(
doc: DesignSpaceDocument,
userRegion: Region,
*,
keepVFs: bool,
makeNames: bool,
expandLocations: bool,
makeInstanceFilename: MakeInstanceFilenameCallable,
) -> DesignSpaceDocument:
subDoc = DesignSpaceDocument()
# Don't include STAT info
# FIXME: (Jany) let's think about it. Not include = OK because the point of
# the splitting is to build VFs and we'll use the STAT data of the full
# document to generate the STAT of the VFs, so "no need" to have STAT data
# in sub-docs. Counterpoint: what if someone wants to split this DS for
# other purposes? Maybe for that it would be useful to also subset the STAT
# data?
# subDoc.elidedFallbackName = doc.elidedFallbackName
def maybeExpandDesignLocation(object):
if expandLocations:
return object.getFullDesignLocation(doc)
else:
return object.designLocation
for axis in doc.axes:
range = userRegion[axis.name]
if isinstance(range, Range) and hasattr(axis, "minimum"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(AxisDescriptor, axis)
subDoc.addAxis(
AxisDescriptor(
# Same info
tag=axis.tag,
name=axis.name,
labelNames=axis.labelNames,
hidden=axis.hidden,
# Subset range
minimum=max(range.minimum, axis.minimum),
default=range.default or axis.default,
maximum=min(range.maximum, axis.maximum),
map=[
(user, design)
for user, design in axis.map
if range.minimum <= user <= range.maximum
],
# Don't include STAT info
axisOrdering=None,
axisLabels=None,
)
)
subDoc.axisMappings = mappings = []
subDocAxes = {axis.name for axis in subDoc.axes}
for mapping in doc.axisMappings:
if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()):
continue
if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()):
LOGGER.error(
"In axis mapping from input %s, some output axes are not in the variable-font: %s",
mapping.inputLocation,
mapping.outputLocation,
)
continue
mappingAxes = set()
mappingAxes.update(mapping.inputLocation.keys())
mappingAxes.update(mapping.outputLocation.keys())
for axis in doc.axes:
if axis.name not in mappingAxes:
continue
range = userRegion[axis.name]
if (
range.minimum != axis.minimum
or (range.default is not None and range.default != axis.default)
or range.maximum != axis.maximum
):
LOGGER.error(
"Limiting axis ranges used in elements not supported: %s",
axis.name,
)
continue
mappings.append(
AxisMappingDescriptor(
inputLocation=mapping.inputLocation,
outputLocation=mapping.outputLocation,
)
)
# Don't include STAT info
# subDoc.locationLabels = doc.locationLabels
# Rules: subset them based on conditions
designRegion = userRegionToDesignRegion(doc, userRegion)
subDoc.rules = _subsetRulesBasedOnConditions(doc.rules, designRegion)
subDoc.rulesProcessingLast = doc.rulesProcessingLast
# Sources: keep only the ones that fall within the kept axis ranges
for source in doc.sources:
if not locationInRegion(doc.map_backward(source.designLocation), userRegion):
continue
subDoc.addSource(
SourceDescriptor(
filename=source.filename,
path=source.path,
font=source.font,
name=source.name,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(source)
),
layerName=source.layerName,
familyName=source.familyName,
styleName=source.styleName,
muteKerning=source.muteKerning,
muteInfo=source.muteInfo,
mutedGlyphNames=source.mutedGlyphNames,
)
)
# Copy family name translations from the old default source to the new default
vfDefault = subDoc.findDefault()
oldDefault = doc.findDefault()
if vfDefault is not None and oldDefault is not None:
vfDefault.localisedFamilyName = oldDefault.localisedFamilyName
# Variable fonts: keep only the ones that fall within the kept axis ranges
if keepVFs:
# Note: call getVariableFont() to make the implicit VFs explicit
for vf in doc.getVariableFonts():
vfUserRegion = getVFUserRegion(doc, vf)
if regionInRegion(vfUserRegion, userRegion):
subDoc.addVariableFont(
VariableFontDescriptor(
name=vf.name,
filename=vf.filename,
axisSubsets=[
axisSubset
for axisSubset in vf.axisSubsets
if isinstance(userRegion[axisSubset.name], Range)
],
lib=vf.lib,
)
)
# Instances: same as Sources + compute missing names
for instance in doc.instances:
if not locationInRegion(instance.getFullUserLocation(doc), userRegion):
continue
if makeNames:
statNames = getStatNames(doc, instance.getFullUserLocation(doc))
familyName = instance.familyName or statNames.familyNames.get("en")
styleName = instance.styleName or statNames.styleNames.get("en")
subDoc.addInstance(
InstanceDescriptor(
filename=instance.filename
or makeInstanceFilename(doc, instance, statNames),
path=instance.path,
font=instance.font,
name=instance.name or f"{familyName} {styleName}",
userLocation={} if expandLocations else instance.userLocation,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(instance)
),
familyName=familyName,
styleName=styleName,
postScriptFontName=instance.postScriptFontName
or statNames.postScriptFontName,
styleMapFamilyName=instance.styleMapFamilyName
or statNames.styleMapFamilyNames.get("en"),
styleMapStyleName=instance.styleMapStyleName
or statNames.styleMapStyleName,
localisedFamilyName=instance.localisedFamilyName
or statNames.familyNames,
localisedStyleName=instance.localisedStyleName
or statNames.styleNames,
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName
or statNames.styleMapFamilyNames,
localisedStyleMapStyleName=instance.localisedStyleMapStyleName
or {},
lib=instance.lib,
)
)
else:
subDoc.addInstance(
InstanceDescriptor(
filename=instance.filename,
path=instance.path,
font=instance.font,
name=instance.name,
userLocation={} if expandLocations else instance.userLocation,
designLocation=_filterLocation(
userRegion, maybeExpandDesignLocation(instance)
),
familyName=instance.familyName,
styleName=instance.styleName,
postScriptFontName=instance.postScriptFontName,
styleMapFamilyName=instance.styleMapFamilyName,
styleMapStyleName=instance.styleMapStyleName,
localisedFamilyName=instance.localisedFamilyName,
localisedStyleName=instance.localisedStyleName,
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName,
localisedStyleMapStyleName=instance.localisedStyleMapStyleName,
lib=instance.lib,
)
)
subDoc.lib = doc.lib
return subDoc
def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet:
c: Dict[str, Range] = {}
for condition in conditionSet:
minimum, maximum = condition.get("minimum"), condition.get("maximum")
c[condition["name"]] = Range(
minimum if minimum is not None else -math.inf,
maximum if maximum is not None else math.inf,
)
return c
def _subsetRulesBasedOnConditions(
rules: List[RuleDescriptor], designRegion: Region
) -> List[RuleDescriptor]:
# What rules to keep:
# - Keep the rule if any conditionset is relevant.
# - A conditionset is relevant if all conditions are relevant or it is empty.
# - A condition is relevant if
# - axis is point (C-AP),
# - and point in condition's range (C-AP-in)
# (in this case remove the condition because it's always true)
# - else (C-AP-out) whole conditionset can be discarded (condition false
# => conditionset false)
# - axis is range (C-AR),
# - (C-AR-all) and axis range fully contained in condition range: we can
# scrap the condition because it's always true
# - (C-AR-inter) and intersection(axis range, condition range) not empty:
# keep the condition with the smaller range (= intersection)
# - (C-AR-none) else, whole conditionset can be discarded
newRules: List[RuleDescriptor] = []
for rule in rules:
newRule: RuleDescriptor = RuleDescriptor(
name=rule.name, conditionSets=[], subs=rule.subs
)
for conditionset in rule.conditionSets:
cs = _conditionSetFrom(conditionset)
newConditionset: List[Dict[str, Any]] = []
discardConditionset = False
for selectionName, selectionValue in designRegion.items():
# TODO: Ensure that all(key in conditionset for key in region.keys())?
if selectionName not in cs:
# raise Exception("Selection has different axes than the rules")
continue
if isinstance(selectionValue, (float, int)): # is point
# Case C-AP-in
if selectionValue in cs[selectionName]:
pass # always matches, conditionset can stay empty for this one.
# Case C-AP-out
else:
discardConditionset = True
else: # is range
# Case C-AR-all
if selectionValue in cs[selectionName]:
pass # always matches, conditionset can stay empty for this one.
else:
intersection = cs[selectionName].intersection(selectionValue)
# Case C-AR-inter
if intersection is not None:
newConditionset.append(
{
"name": selectionName,
"minimum": intersection.minimum,
"maximum": intersection.maximum,
}
)
# Case C-AR-none
else:
discardConditionset = True
if not discardConditionset:
newRule.conditionSets.append(newConditionset)
if newRule.conditionSets:
newRules.append(newRule)
return newRules
def _filterLocation(
userRegion: Region,
location: Dict[str, float],
) -> Dict[str, float]:
return {
name: value
for name, value in location.items()
if name in userRegion and isinstance(userRegion[name], Range)
}
"""Compute name information for a given location in user-space coordinates
using STAT data. This can be used to fill-in automatically the names of an
instance:
.. code:: python
instance = doc.instances[0]
names = getStatNames(doc, instance.getFullUserLocation(doc))
print(names.styleNames)
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Literal, Optional, Tuple, Union
import logging
from fontTools.designspaceLib import (
AxisDescriptor,
AxisLabelDescriptor,
DesignSpaceDocument,
DiscreteAxisDescriptor,
SimpleLocationDict,
SourceDescriptor,
)
LOGGER = logging.getLogger(__name__)
RibbiStyleName = Union[
Literal["regular"],
Literal["bold"],
Literal["italic"],
Literal["bold italic"],
]
BOLD_ITALIC_TO_RIBBI_STYLE = {
(False, False): "regular",
(False, True): "italic",
(True, False): "bold",
(True, True): "bold italic",
}
@dataclass
class StatNames:
"""Name data generated from the STAT table information."""
familyNames: Dict[str, str]
styleNames: Dict[str, str]
postScriptFontName: Optional[str]
styleMapFamilyNames: Dict[str, str]
styleMapStyleName: Optional[RibbiStyleName]
def getStatNames(
doc: DesignSpaceDocument, userLocation: SimpleLocationDict
) -> StatNames:
"""Compute the family, style, PostScript names of the given ``userLocation``
using the document's STAT information.
Also computes localizations.
If not enough STAT data is available for a given name, either its dict of
localized names will be empty (family and style names), or the name will be
None (PostScript name).
Note: this method does not consider info attached to the instance, like
family name. The user needs to override all names on an instance that STAT
information would compute differently than desired.
.. versionadded:: 5.0
"""
familyNames: Dict[str, str] = {}
defaultSource: Optional[SourceDescriptor] = doc.findDefault()
if defaultSource is None:
LOGGER.warning("Cannot determine default source to look up family name.")
elif defaultSource.familyName is None:
LOGGER.warning(
"Cannot look up family name, assign the 'familyname' attribute to the default source."
)
else:
familyNames = {
"en": defaultSource.familyName,
**defaultSource.localisedFamilyName,
}
styleNames: Dict[str, str] = {}
# If a free-standing label matches the location, use it for name generation.
label = doc.labelForUserLocation(userLocation)
if label is not None:
styleNames = {"en": label.name, **label.labelNames}
# Otherwise, scour the axis labels for matches.
else:
# Gather all languages in which at least one translation is provided
# Then build names for all these languages, but fallback to English
# whenever a translation is missing.
labels = _getAxisLabelsForUserLocation(doc.axes, userLocation)
if labels:
languages = set(
language for label in labels for language in label.labelNames
)
languages.add("en")
for language in languages:
styleName = " ".join(
label.labelNames.get(language, label.defaultName)
for label in labels
if not label.elidable
)
if not styleName and doc.elidedFallbackName is not None:
styleName = doc.elidedFallbackName
styleNames[language] = styleName
if "en" not in familyNames or "en" not in styleNames:
# Not enough information to compute PS names of styleMap names
return StatNames(
familyNames=familyNames,
styleNames=styleNames,
postScriptFontName=None,
styleMapFamilyNames={},
styleMapStyleName=None,
)
postScriptFontName = f"{familyNames['en']}-{styleNames['en']}".replace(" ", "")
styleMapStyleName, regularUserLocation = _getRibbiStyle(doc, userLocation)
styleNamesForStyleMap = styleNames
if regularUserLocation != userLocation:
regularStatNames = getStatNames(doc, regularUserLocation)
styleNamesForStyleMap = regularStatNames.styleNames
styleMapFamilyNames = {}
for language in set(familyNames).union(styleNames.keys()):
familyName = familyNames.get(language, familyNames["en"])
styleName = styleNamesForStyleMap.get(language, styleNamesForStyleMap["en"])
styleMapFamilyNames[language] = (familyName + " " + styleName).strip()
return StatNames(
familyNames=familyNames,
styleNames=styleNames,
postScriptFontName=postScriptFontName,
styleMapFamilyNames=styleMapFamilyNames,
styleMapStyleName=styleMapStyleName,
)
def _getSortedAxisLabels(
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
) -> Dict[str, list[AxisLabelDescriptor]]:
"""Returns axis labels sorted by their ordering, with unordered ones appended as
they are listed."""
# First, get the axis labels with explicit ordering...
sortedAxes = sorted(
(axis for axis in axes if axis.axisOrdering is not None),
key=lambda a: a.axisOrdering,
)
sortedLabels: Dict[str, list[AxisLabelDescriptor]] = {
axis.name: axis.axisLabels for axis in sortedAxes
}
# ... then append the others in the order they appear.
# NOTE: This relies on Python 3.7+ dict's preserved insertion order.
for axis in axes:
if axis.axisOrdering is None:
sortedLabels[axis.name] = axis.axisLabels
return sortedLabels
def _getAxisLabelsForUserLocation(
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
userLocation: SimpleLocationDict,
) -> list[AxisLabelDescriptor]:
labels: list[AxisLabelDescriptor] = []
allAxisLabels = _getSortedAxisLabels(axes)
if allAxisLabels.keys() != userLocation.keys():
LOGGER.warning(
f"Mismatch between user location '{userLocation.keys()}' and available "
f"labels for '{allAxisLabels.keys()}'."
)
for axisName, axisLabels in allAxisLabels.items():
userValue = userLocation[axisName]
label: Optional[AxisLabelDescriptor] = next(
(
l
for l in axisLabels
if l.userValue == userValue
or (
l.userMinimum is not None
and l.userMaximum is not None
and l.userMinimum <= userValue <= l.userMaximum
)
),
None,
)
if label is None:
LOGGER.debug(
f"Document needs a label for axis '{axisName}', user value '{userValue}'."
)
else:
labels.append(label)
return labels
def _getRibbiStyle(
self: DesignSpaceDocument, userLocation: SimpleLocationDict
) -> Tuple[RibbiStyleName, SimpleLocationDict]:
"""Compute the RIBBI style name of the given user location,
return the location of the matching Regular in the RIBBI group.
.. versionadded:: 5.0
"""
regularUserLocation = {}
axes_by_tag = {axis.tag: axis for axis in self.axes}
bold: bool = False
italic: bool = False
axis = axes_by_tag.get("wght")
if axis is not None:
for regular_label in axis.axisLabels:
if (
regular_label.linkedUserValue == userLocation[axis.name]
# In the "recursive" case where both the Regular has
# linkedUserValue pointing the Bold, and the Bold has
# linkedUserValue pointing to the Regular, only consider the
# first case: Regular (e.g. 400) has linkedUserValue pointing to
# Bold (e.g. 700, higher than Regular)
and regular_label.userValue < regular_label.linkedUserValue
):
regularUserLocation[axis.name] = regular_label.userValue
bold = True
break
axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt")
if axis is not None:
for upright_label in axis.axisLabels:
if (
upright_label.linkedUserValue == userLocation[axis.name]
# In the "recursive" case where both the Upright has
# linkedUserValue pointing the Italic, and the Italic has
# linkedUserValue pointing to the Upright, only consider the
# first case: Upright (e.g. ital=0, slant=0) has
# linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or
# slant=12 for backwards italics, in any case higher than
# Upright in absolute value, hence the abs() below.
and abs(upright_label.userValue) < abs(upright_label.linkedUserValue)
):
regularUserLocation[axis.name] = upright_label.userValue
italic = True
break
return BOLD_ITALIC_TO_RIBBI_STYLE[bold, italic], {
**userLocation,
**regularUserLocation,
}
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, List, Optional, Union, cast
from fontTools.designspaceLib import (
AxisDescriptor,
DesignSpaceDocument,
DesignSpaceDocumentError,
RangeAxisSubsetDescriptor,
SimpleLocationDict,
ValueAxisSubsetDescriptor,
VariableFontDescriptor,
)
def clamp(value, minimum, maximum):
return min(max(value, minimum), maximum)
@dataclass
class Range:
minimum: float
"""Inclusive minimum of the range."""
maximum: float
"""Inclusive maximum of the range."""
default: float = 0
"""Default value"""
def __post_init__(self):
self.minimum, self.maximum = sorted((self.minimum, self.maximum))
self.default = clamp(self.default, self.minimum, self.maximum)
def __contains__(self, value: Union[float, Range]) -> bool:
if isinstance(value, Range):
return self.minimum <= value.minimum and value.maximum <= self.maximum
return self.minimum <= value <= self.maximum
def intersection(self, other: Range) -> Optional[Range]:
if self.maximum < other.minimum or self.minimum > other.maximum:
return None
else:
return Range(
max(self.minimum, other.minimum),
min(self.maximum, other.maximum),
self.default, # We don't care about the default in this use-case
)
# A region selection is either a range or a single value, as a Designspace v5
# axis-subset element only allows a single discrete value or a range for a
# variable-font element.
Region = Dict[str, Union[Range, float]]
# A conditionset is a set of named ranges.
ConditionSet = Dict[str, Range]
# A rule is a list of conditionsets where any has to be relevant for the whole rule to be relevant.
Rule = List[ConditionSet]
Rules = Dict[str, Rule]
def locationInRegion(location: SimpleLocationDict, region: Region) -> bool:
for name, value in location.items():
if name not in region:
return False
regionValue = region[name]
if isinstance(regionValue, (float, int)):
if value != regionValue:
return False
else:
if value not in regionValue:
return False
return True
def regionInRegion(region: Region, superRegion: Region) -> bool:
for name, value in region.items():
if not name in superRegion:
return False
superValue = superRegion[name]
if isinstance(superValue, (float, int)):
if value != superValue:
return False
else:
if value not in superValue:
return False
return True
def userRegionToDesignRegion(doc: DesignSpaceDocument, userRegion: Region) -> Region:
designRegion = {}
for name, value in userRegion.items():
axis = doc.getAxis(name)
if axis is None:
raise DesignSpaceDocumentError(
f"Cannot find axis named '{name}' for region."
)
if isinstance(value, (float, int)):
designRegion[name] = axis.map_forward(value)
else:
designRegion[name] = Range(
axis.map_forward(value.minimum),
axis.map_forward(value.maximum),
axis.map_forward(value.default),
)
return designRegion
def getVFUserRegion(doc: DesignSpaceDocument, vf: VariableFontDescriptor) -> Region:
vfUserRegion: Region = {}
# For each axis, 2 cases:
# - it has a range = it's an axis in the VF DS
# - it's a single location = use it to know which rules should apply in the VF
for axisSubset in vf.axisSubsets:
axis = doc.getAxis(axisSubset.name)
if axis is None:
raise DesignSpaceDocumentError(
f"Cannot find axis named '{axisSubset.name}' for variable font '{vf.name}'."
)
if hasattr(axisSubset, "userMinimum"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axisSubset = cast(RangeAxisSubsetDescriptor, axisSubset)
if not hasattr(axis, "minimum"):
raise DesignSpaceDocumentError(
f"Cannot select a range over '{axis.name}' for variable font '{vf.name}' "
"because it's a discrete axis, use only 'userValue' instead."
)
axis = cast(AxisDescriptor, axis)
vfUserRegion[axis.name] = Range(
max(axisSubset.userMinimum, axis.minimum),
min(axisSubset.userMaximum, axis.maximum),
axisSubset.userDefault or axis.default,
)
else:
axisSubset = cast(ValueAxisSubsetDescriptor, axisSubset)
vfUserRegion[axis.name] = axisSubset.userValue
# Any axis not mentioned explicitly has a single location = default value
for axis in doc.axes:
if axis.name not in vfUserRegion:
assert isinstance(
axis.default, (int, float)
), f"Axis '{axis.name}' has no valid default value."
vfUserRegion[axis.name] = axis.default
return vfUserRegion
"""
designSpaceDocument
- Read and write designspace files
"""
from __future__ import annotations
import collections
import copy
import itertools
import math
import os
import posixpath
from io import BytesIO, StringIO
from textwrap import indent
from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union, cast
from fontTools.misc import etree as ET
from fontTools.misc import plistlib
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.textTools import tobytes, tostr
__all__ = [
"AxisDescriptor",
"AxisLabelDescriptor",
"AxisMappingDescriptor",
"BaseDocReader",
"BaseDocWriter",
"DesignSpaceDocument",
"DesignSpaceDocumentError",
"DiscreteAxisDescriptor",
"InstanceDescriptor",
"LocationLabelDescriptor",
"RangeAxisSubsetDescriptor",
"RuleDescriptor",
"SourceDescriptor",
"ValueAxisSubsetDescriptor",
"VariableFontDescriptor",
]
# ElementTree allows to find namespace-prefixed elements, but not attributes
# so we have to do it ourselves for 'xml:lang'
XML_NS = "{http://www.w3.org/XML/1998/namespace}"
XML_LANG = XML_NS + "lang"
def posix(path):
"""Normalize paths using forward slash to work also on Windows."""
new_path = posixpath.join(*path.split(os.path.sep))
if path.startswith("/"):
# The above transformation loses absolute paths
new_path = "/" + new_path
elif path.startswith(r"\\"):
# The above transformation loses leading slashes of UNC path mounts
new_path = "//" + new_path
return new_path
def posixpath_property(private_name):
"""Generate a propery that holds a path always using forward slashes."""
def getter(self):
# Normal getter
return getattr(self, private_name)
def setter(self, value):
# The setter rewrites paths using forward slashes
if value is not None:
value = posix(value)
setattr(self, private_name, value)
return property(getter, setter)
class DesignSpaceDocumentError(Exception):
def __init__(self, msg, obj=None):
self.msg = msg
self.obj = obj
def __str__(self):
return str(self.msg) + (": %r" % self.obj if self.obj is not None else "")
class AsDictMixin(object):
def asdict(self):
d = {}
for attr, value in self.__dict__.items():
if attr.startswith("_"):
continue
if hasattr(value, "asdict"):
value = value.asdict()
elif isinstance(value, list):
value = [v.asdict() if hasattr(v, "asdict") else v for v in value]
d[attr] = value
return d
class SimpleDescriptor(AsDictMixin):
"""Containers for a bunch of attributes"""
# XXX this is ugly. The 'print' is inappropriate here, and instead of
# assert, it should simply return True/False
def compare(self, other):
# test if this object contains the same data as the other
for attr in self._attrs:
try:
assert getattr(self, attr) == getattr(other, attr)
except AssertionError:
print(
"failed attribute",
attr,
getattr(self, attr),
"!=",
getattr(other, attr),
)
def __repr__(self):
attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs]
attrs = indent("\n".join(attrs), " ")
return f"{self.__class__.__name__}(\n{attrs}\n)"
class SourceDescriptor(SimpleDescriptor):
"""Simple container for data related to the source
.. code:: python
doc = DesignSpaceDocument()
s1 = SourceDescriptor()
s1.path = masterPath1
s1.name = "master.ufo1"
s1.font = defcon.Font("master.ufo1")
s1.location = dict(weight=0)
s1.familyName = "MasterFamilyName"
s1.styleName = "MasterStyleNameOne"
s1.localisedFamilyName = dict(fr="Caractère")
s1.mutedGlyphNames.append("A")
s1.mutedGlyphNames.append("Z")
doc.addSource(s1)
"""
flavor = "source"
_attrs = [
"filename",
"path",
"name",
"layerName",
"location",
"copyLib",
"copyGroups",
"copyFeatures",
"muteKerning",
"muteInfo",
"mutedGlyphNames",
"familyName",
"styleName",
"localisedFamilyName",
]
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
designLocation=None,
layerName=None,
familyName=None,
styleName=None,
localisedFamilyName=None,
copyLib=False,
copyInfo=False,
copyGroups=False,
copyFeatures=False,
muteKerning=False,
muteInfo=False,
mutedGlyphNames=None,
):
self.filename = filename
"""string. A relative path to the source file, **as it is in the document**.
MutatorMath + VarLib.
"""
self.path = path
"""The absolute path, calculated from filename."""
self.font = font
"""Any Python object. Optional. Points to a representation of this
source font that is loaded in memory, as a Python object (e.g. a
``defcon.Font`` or a ``fontTools.ttFont.TTFont``).
The default document reader will not fill-in this attribute, and the
default writer will not use this attribute. It is up to the user of
``designspaceLib`` to either load the resource identified by
``filename`` and store it in this field, or write the contents of
this field to the disk and make ```filename`` point to that.
"""
self.name = name
"""string. Optional. Unique identifier name for this source.
MutatorMath + varLib.
"""
self.designLocation = (
designLocation if designLocation is not None else location or {}
)
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + varLib.
This may be only part of the full design location.
See :meth:`getFullDesignLocation()`
.. versionadded:: 5.0
"""
self.layerName = layerName
"""string. The name of the layer in the source to look for
outline data. Default ``None`` which means ``foreground``.
"""
self.familyName = familyName
"""string. Family name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
varLib.
"""
self.styleName = styleName
"""string. Style name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
varLib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name strings, keyed by
language code.
If present, will be used to build localized names for all instances.
.. versionadded:: 5.0
"""
self.copyLib = copyLib
"""bool. Indicates if the contents of the font.lib need to
be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyInfo = copyInfo
"""bool. Indicates if the non-interpolating font.info needs
to be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyGroups = copyGroups
"""bool. Indicates if the groups need to be copied to the
instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyFeatures = copyFeatures
"""bool. Indicates if the feature text needs to be
copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.muteKerning = muteKerning
"""bool. Indicates if the kerning data from this source
needs to be muted (i.e. not be part of the calculations).
MutatorMath only.
"""
self.muteInfo = muteInfo
"""bool. Indicated if the interpolating font.info data for
this source needs to be muted.
MutatorMath only.
"""
self.mutedGlyphNames = mutedGlyphNames or []
"""list. Glyphnames that need to be muted in the
instances.
MutatorMath only.
"""
@property
def location(self):
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + varLib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[SimpleLocationDict]):
self.designLocation = location or {}
def setFamilyName(self, familyName, languageCode="en"):
"""Setter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
"""Getter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
return self.localisedFamilyName.get(languageCode)
def getFullDesignLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete design location of this source, from its
:attr:`designLocation` and the document's axis defaults.
.. versionadded:: 5.0
"""
result: SimpleLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
else:
result[axis.name] = axis.map_forward(axis.default)
return result
class RuleDescriptor(SimpleDescriptor):
"""Represents the rule descriptor element: a set of glyph substitutions to
trigger conditionally in some parts of the designspace.
.. code:: python
r1 = RuleDescriptor()
r1.name = "unique.rule.name"
r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)])
r1.conditionSets.append([dict(...), dict(...)])
r1.subs.append(("a", "a.alt"))
.. code:: xml
"""
_attrs = ["name", "conditionSets", "subs"] # what do we need here
def __init__(self, *, name=None, conditionSets=None, subs=None):
self.name = name
"""string. Unique name for this rule. Can be used to reference this rule data."""
# list of lists of dict(name='aaaa', minimum=0, maximum=1000)
self.conditionSets = conditionSets or []
"""a list of conditionsets.
- Each conditionset is a list of conditions.
- Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys.
"""
# list of substitutions stored as tuples of glyphnames ("a", "a.alt")
self.subs = subs or []
"""list of substitutions.
- Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt").
- Note: By default, rules are applied first, before other text
shaping/OpenType layout, as they are part of the
`Required Variation Alternates OpenType feature `_.
See ref:`rules-element` § Attributes.
"""
def evaluateRule(rule, location):
"""Return True if any of the rule's conditionsets matches the given location."""
return any(evaluateConditions(c, location) for c in rule.conditionSets)
def evaluateConditions(conditions, location):
"""Return True if all the conditions matches the given location.
- If a condition has no minimum, check for < maximum.
- If a condition has no maximum, check for > minimum.
"""
for cd in conditions:
value = location[cd["name"]]
if cd.get("minimum") is None:
if value > cd["maximum"]:
return False
elif cd.get("maximum") is None:
if cd["minimum"] > value:
return False
elif not cd["minimum"] <= value <= cd["maximum"]:
return False
return True
def processRules(rules, location, glyphNames):
"""Apply these rules at this location to these glyphnames.
Return a new list of glyphNames with substitutions applied.
- rule order matters
"""
newNames = []
for rule in rules:
if evaluateRule(rule, location):
for name in glyphNames:
swap = False
for a, b in rule.subs:
if name == a:
swap = True
break
if swap:
newNames.append(b)
else:
newNames.append(name)
glyphNames = newNames
newNames = []
return glyphNames
AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]]
SimpleLocationDict = Dict[str, float]
class AxisMappingDescriptor(SimpleDescriptor):
"""Represents the axis mapping element: mapping an input location
to an output location in the designspace.
.. code:: python
m1 = AxisMappingDescriptor()
m1.inputLocation = {"weight": 900, "width": 150}
m1.outputLocation = {"weight": 870}
.. code:: xml
"""
_attrs = ["inputLocation", "outputLocation"]
def __init__(
self,
*,
inputLocation=None,
outputLocation=None,
description=None,
groupDescription=None,
):
self.inputLocation: SimpleLocationDict = inputLocation or {}
"""dict. Axis values for the input of the mapping, in design space coordinates.
varLib.
.. versionadded:: 5.1
"""
self.outputLocation: SimpleLocationDict = outputLocation or {}
"""dict. Axis values for the output of the mapping, in design space coordinates.
varLib.
.. versionadded:: 5.1
"""
self.description = description
"""string. A description of the mapping.
varLib.
.. versionadded:: 5.2
"""
self.groupDescription = groupDescription
"""string. A description of the group of mappings.
varLib.
.. versionadded:: 5.2
"""
class InstanceDescriptor(SimpleDescriptor):
"""Simple container for data related to the instance
.. code:: python
i2 = InstanceDescriptor()
i2.path = instancePath2
i2.familyName = "InstanceFamilyName"
i2.styleName = "InstanceStyleName"
i2.name = "instance.ufo2"
# anisotropic location
i2.designLocation = dict(weight=500, width=(400,300))
i2.postScriptFontName = "InstancePostscriptName"
i2.styleMapFamilyName = "InstanceStyleMapFamilyName"
i2.styleMapStyleName = "InstanceStyleMapStyleName"
i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever'
doc.addInstance(i2)
"""
flavor = "instance"
_defaultLanguageCode = "en"
_attrs = [
"filename",
"path",
"name",
"locationLabel",
"designLocation",
"userLocation",
"familyName",
"styleName",
"postScriptFontName",
"styleMapFamilyName",
"styleMapStyleName",
"localisedFamilyName",
"localisedStyleName",
"localisedStyleMapFamilyName",
"localisedStyleMapStyleName",
"glyphs",
"kerning",
"info",
"lib",
]
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
locationLabel=None,
designLocation=None,
userLocation=None,
familyName=None,
styleName=None,
postScriptFontName=None,
styleMapFamilyName=None,
styleMapStyleName=None,
localisedFamilyName=None,
localisedStyleName=None,
localisedStyleMapFamilyName=None,
localisedStyleMapStyleName=None,
glyphs=None,
kerning=True,
info=True,
lib=None,
):
self.filename = filename
"""string. Relative path to the instance file, **as it is
in the document**. The file may or may not exist.
MutatorMath + VarLib.
"""
self.path = path
"""string. Absolute path to the instance file, calculated from
the document path and the string in the filename attr. The file may
or may not exist.
MutatorMath.
"""
self.font = font
"""Same as :attr:`SourceDescriptor.font`
.. seealso:: :attr:`SourceDescriptor.font`
"""
self.name = name
"""string. Unique identifier name of the instance, used to
identify it if it needs to be referenced from elsewhere in the
document.
"""
self.locationLabel = locationLabel
"""Name of a :class:`LocationLabelDescriptor`. If
provided, the instance should have the same location as the
LocationLabel.
.. seealso::
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.designLocation: AnisotropicLocationDict = (
designLocation if designLocation is not None else (location or {})
)
"""dict. Axis values for this instance, in design space coordinates.
MutatorMath + varLib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.userLocation: SimpleLocationDict = userLocation or {}
"""dict. Axis values for this instance, in user space coordinates.
MutatorMath + varLib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.familyName = familyName
"""string. Family name of this instance.
MutatorMath + varLib.
"""
self.styleName = styleName
"""string. Style name of this instance.
MutatorMath + varLib.
"""
self.postScriptFontName = postScriptFontName
"""string. Postscript fontname for this instance.
MutatorMath + varLib.
"""
self.styleMapFamilyName = styleMapFamilyName
"""string. StyleMap familyname for this instance.
MutatorMath + varLib.
"""
self.styleMapStyleName = styleMapStyleName
"""string. StyleMap stylename for this instance.
MutatorMath + varLib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name
strings, keyed by language code.
"""
self.localisedStyleName = localisedStyleName or {}
"""dict. A dictionary of localised stylename
strings, keyed by language code.
"""
self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {}
"""A dictionary of localised style map
familyname strings, keyed by language code.
"""
self.localisedStyleMapStyleName = localisedStyleMapStyleName or {}
"""A dictionary of localised style map
stylename strings, keyed by language code.
"""
self.glyphs = glyphs or {}
"""dict for special master definitions for glyphs. If glyphs
need special masters (to record the results of executed rules for
example).
MutatorMath.
.. deprecated:: 5.0
Use rules or sparse sources instead.
"""
self.kerning = kerning
""" bool. Indicates if this instance needs its kerning
calculated.
MutatorMath.
.. deprecated:: 5.0
"""
self.info = info
"""bool. Indicated if this instance needs the interpolating
font.info calculated.
.. deprecated:: 5.0
"""
self.lib = lib or {}
"""Custom data associated with this instance."""
@property
def location(self):
"""dict. Axis values for this instance.
MutatorMath + varLib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setStyleName(self, styleName, languageCode="en"):
"""These methods give easier access to the localised names."""
self.localisedStyleName[languageCode] = tostr(styleName)
def getStyleName(self, languageCode="en"):
return self.localisedStyleName.get(languageCode)
def setFamilyName(self, familyName, languageCode="en"):
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
return self.localisedFamilyName.get(languageCode)
def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"):
self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName)
def getStyleMapStyleName(self, languageCode="en"):
return self.localisedStyleMapStyleName.get(languageCode)
def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"):
self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName)
def getStyleMapFamilyName(self, languageCode="en"):
return self.localisedStyleMapFamilyName.get(languageCode)
def clearLocation(self, axisName: Optional[str] = None):
"""Clear all location-related fields. Ensures that
:attr:``designLocation`` and :attr:``userLocation`` are dictionaries
(possibly empty if clearing everything).
In order to update the location of this instance wholesale, a user
should first clear all the fields, then change the field(s) for which
they have data.
.. code:: python
instance.clearLocation()
instance.designLocation = {'Weight': (34, 36.5), 'Width': 100}
instance.userLocation = {'Opsz': 16}
In order to update a single axis location, the user should only clear
that axis, then edit the values:
.. code:: python
instance.clearLocation('Weight')
instance.designLocation['Weight'] = (34, 36.5)
Args:
axisName: if provided, only clear the location for that axis.
.. versionadded:: 5.0
"""
self.locationLabel = None
if axisName is None:
self.designLocation = {}
self.userLocation = {}
else:
if self.designLocation is None:
self.designLocation = {}
if axisName in self.designLocation:
del self.designLocation[axisName]
if self.userLocation is None:
self.userLocation = {}
if axisName in self.userLocation:
del self.userLocation[axisName]
def getLocationLabelDescriptor(
self, doc: "DesignSpaceDocument"
) -> Optional[LocationLabelDescriptor]:
"""Get the :class:`LocationLabelDescriptor` instance that matches
this instances's :attr:`locationLabel`.
Raises if the named label can't be found.
.. versionadded:: 5.0
"""
if self.locationLabel is None:
return None
label = doc.getLocationLabel(self.locationLabel)
if label is None:
raise DesignSpaceDocumentError(
"InstanceDescriptor.getLocationLabelDescriptor(): "
f"unknown location label `{self.locationLabel}` in instance `{self.name}`."
)
return label
def getFullDesignLocation(
self, doc: "DesignSpaceDocument"
) -> AnisotropicLocationDict:
"""Get the complete design location of this instance, by combining data
from the various location fields, default axis values and mappings, and
top-level location labels.
The source of truth for this instance's location is determined for each
axis independently by taking the first not-None field in this list:
- ``locationLabel``: the location along this axis is the same as the
matching STAT format 4 label. No anisotropy.
- ``designLocation[axisName]``: the explicit design location along this
axis, possibly anisotropic.
- ``userLocation[axisName]``: the explicit user location along this
axis. No anisotropy.
- ``axis.default``: default axis value. No anisotropy.
.. versionadded:: 5.0
"""
label = self.getLocationLabelDescriptor(doc)
if label is not None:
return doc.map_forward(label.userLocation) # type: ignore
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
elif axis.name in self.userLocation:
result[axis.name] = axis.map_forward(self.userLocation[axis.name])
else:
result[axis.name] = axis.map_forward(axis.default)
return result
def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete user location for this instance.
.. seealso:: :meth:`getFullDesignLocation`
.. versionadded:: 5.0
"""
return doc.map_backward(self.getFullDesignLocation(doc))
def tagForAxisName(name):
# try to find or make a tag name for this axis name
names = {
"weight": ("wght", dict(en="Weight")),
"width": ("wdth", dict(en="Width")),
"optical": ("opsz", dict(en="Optical Size")),
"slant": ("slnt", dict(en="Slant")),
"italic": ("ital", dict(en="Italic")),
}
if name.lower() in names:
return names[name.lower()]
if len(name) < 4:
tag = name + "*" * (4 - len(name))
else:
tag = name[:4]
return tag, dict(en=name)
class AbstractAxisDescriptor(SimpleDescriptor):
flavor = "axis"
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
# opentype tag for this axis
self.tag = tag
"""string. Four letter tag for this axis. Some might be
registered at the `OpenType
specification `__.
Privately-defined axis tags must begin with an uppercase letter and
use only uppercase letters or digits.
"""
# name of the axis used in locations
self.name = name
"""string. Name of the axis as it is used in the location dicts.
MutatorMath + varLib.
"""
# names for UI purposes, if this is not a standard axis,
self.labelNames = labelNames or {}
"""dict. When defining a non-registered axis, it will be
necessary to define user-facing readable names for the axis. Keyed by
xml:lang code. Values are required to be ``unicode`` strings, even if
they only contain ASCII characters.
"""
self.hidden = hidden
"""bool. Whether this axis should be hidden in user interfaces.
"""
self.map = map or []
"""list of input / output values that can describe a warp of user space
to design space coordinates. If no map values are present, it is assumed
user space is the same as design space, as in [(minimum, minimum),
(maximum, maximum)].
varLib.
"""
self.axisOrdering = axisOrdering
"""STAT table field ``axisOrdering``.
See: `OTSpec STAT Axis Record `_
.. versionadded:: 5.0
"""
self.axisLabels: List[AxisLabelDescriptor] = axisLabels or []
"""STAT table entries for Axis Value Tables format 1, 2, 3.
See: `OTSpec STAT Axis Value Tables `_
.. versionadded:: 5.0
"""
class AxisDescriptor(AbstractAxisDescriptor):
"""Simple container for the axis data.
Add more localisations?
.. code:: python
a1 = AxisDescriptor()
a1.minimum = 1
a1.maximum = 1000
a1.default = 400
a1.name = "weight"
a1.tag = "wght"
a1.labelNames['fa-IR'] = "قطر"
a1.labelNames['en'] = "Wéíght"
a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)]
a1.axisOrdering = 1
a1.axisLabels = [
AxisLabelDescriptor(name="Regular", userValue=400, elidable=True)
]
doc.addAxis(a1)
"""
_attrs = [
"tag",
"name",
"maximum",
"minimum",
"default",
"map",
"axisOrdering",
"axisLabels",
]
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
minimum=None,
default=None,
maximum=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.minimum = minimum
"""number. The minimum value for this axis in user space.
MutatorMath + varLib.
"""
self.maximum = maximum
"""number. The maximum value for this axis in user space.
MutatorMath + varLib.
"""
self.default = default
"""number. The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
MutatorMath + varLib.
"""
def serialize(self):
# output to a dict, used in testing
return dict(
tag=self.tag,
name=self.name,
labelNames=self.labelNames,
maximum=self.maximum,
minimum=self.minimum,
default=self.default,
hidden=self.hidden,
map=self.map,
axisOrdering=self.axisOrdering,
axisLabels=self.axisLabels,
)
def map_forward(self, v):
"""Maps value from axis mapping's input (user) to output (design)."""
from fontTools.varLib.models import piecewiseLinearMap
if not self.map:
return v
return piecewiseLinearMap(v, {k: v for k, v in self.map})
def map_backward(self, v):
"""Maps value from axis mapping's output (design) to input (user)."""
from fontTools.varLib.models import piecewiseLinearMap
if isinstance(v, tuple):
v = v[0]
if not self.map:
return v
return piecewiseLinearMap(v, {v: k for k, v in self.map})
class DiscreteAxisDescriptor(AbstractAxisDescriptor):
"""Container for discrete axis data.
Use this for axes that do not interpolate. The main difference from a
continuous axis is that a continuous axis has a ``minimum`` and ``maximum``,
while a discrete axis has a list of ``values``.
Example: an Italic axis with 2 stops, Roman and Italic, that are not
compatible. The axis still allows to bind together the full font family,
which is useful for the STAT table, however it can't become a variation
axis in a VF.
.. code:: python
a2 = DiscreteAxisDescriptor()
a2.values = [0, 1]
a2.default = 0
a2.name = "Italic"
a2.tag = "ITAL"
a2.labelNames['fr'] = "Italique"
a2.map = [(0, 0), (1, -11)]
a2.axisOrdering = 2
a2.axisLabels = [
AxisLabelDescriptor(name="Roman", userValue=0, elidable=True)
]
doc.addAxis(a2)
.. versionadded:: 5.0
"""
flavor = "axis"
_attrs = ("tag", "name", "values", "default", "map", "axisOrdering", "axisLabels")
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
values=None,
default=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.default: float = default
"""The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
However, this default value is less important than in continuous axes:
- it doesn't define the "neutral" version of outlines from which
deltas would apply, as this axis does not interpolate.
- it doesn't provide the reference glyph set for the designspace, as
fonts at each value can have different glyph sets.
"""
self.values: List[float] = values or []
"""List of possible values for this axis. Contrary to continuous axes,
only the values in this list can be taken by the axis, nothing in-between.
"""
def map_forward(self, value):
"""Maps value from axis mapping's input to output.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
return next((v for k, v in self.map if k == value), value)
def map_backward(self, value):
"""Maps value from axis mapping's output to input.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
if isinstance(value, tuple):
value = value[0]
return next((k for k, v in self.map if v == value), value)
class AxisLabelDescriptor(SimpleDescriptor):
"""Container for axis label data.
Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3).
All values are user values.
See: `OTSpec STAT Axis value table, format 1, 2, 3 `_
The STAT format of the Axis value depends on which field are filled-in,
see :meth:`getFormat`
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = (
"userMinimum",
"userValue",
"userMaximum",
"name",
"elidable",
"olderSibling",
"linkedUserValue",
"labelNames",
)
def __init__(
self,
*,
name,
userValue,
userMinimum=None,
userMaximum=None,
elidable=False,
olderSibling=False,
linkedUserValue=None,
labelNames=None,
):
self.userMinimum: Optional[float] = userMinimum
"""STAT field ``rangeMinValue`` (format 2)."""
self.userValue: float = userValue
"""STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2)."""
self.userMaximum: Optional[float] = userMaximum
"""STAT field ``rangeMaxValue`` (format 2)."""
self.name: str = name
"""Label for this axis location, STAT field ``valueNameID``."""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags `_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags `_
"""
self.linkedUserValue: Optional[float] = linkedUserValue
"""STAT field ``linkedValue`` (format 3)."""
self.labelNames: MutableMapping[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
``xml:lang`` code.
"""
def getFormat(self) -> int:
"""Determine which format of STAT Axis value to use to encode this label.
=========== ========= =========== =========== ===============
STAT Format userValue userMinimum userMaximum linkedUserValue
=========== ========= =========== =========== ===============
1 ✅ ❌ ❌ ❌
2 ✅ ✅ ✅ ❌
3 ✅ ❌ ❌ ✅
=========== ========= =========== =========== ===============
"""
if self.linkedUserValue is not None:
return 3
if self.userMinimum is not None or self.userMaximum is not None:
return 2
return 1
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
class LocationLabelDescriptor(SimpleDescriptor):
"""Container for location label data.
Analogue of OpenType's STAT data for a free-floating location (format 4).
All values are user values.
See: `OTSpec STAT Axis value table, format 4 `_
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ("name", "elidable", "olderSibling", "userLocation", "labelNames")
def __init__(
self,
*,
name,
userLocation,
elidable=False,
olderSibling=False,
labelNames=None,
):
self.name: str = name
"""Label for this named location, STAT field ``valueNameID``."""
self.userLocation: SimpleLocationDict = userLocation or {}
"""Location in user coordinates along each axis.
If an axis is not mentioned, it is assumed to be at its default location.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullUserLocation`
"""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags `_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags `_
"""
self.labelNames: Dict[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
xml:lang code.
"""
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
def getFullUserLocation(self, doc: "DesignSpaceDocument") -> SimpleLocationDict:
"""Get the complete user location of this label, by combining data
from the explicit user location and default axis values.
.. versionadded:: 5.0
"""
return {
axis.name: self.userLocation.get(axis.name, axis.default)
for axis in doc.axes
}
class VariableFontDescriptor(SimpleDescriptor):
"""Container for variable fonts, sub-spaces of the Designspace.
Use-cases:
- From a single DesignSpace with discrete axes, define 1 variable font
per value on the discrete axes. Before version 5, you would have needed
1 DesignSpace per such variable font, and a lot of data duplication.
- From a big variable font with many axes, define subsets of that variable
font that only include some axes and freeze other axes at a given location.
.. versionadded:: 5.0
"""
flavor = "variable-font"
_attrs = ("filename", "axisSubsets", "lib")
filename = posixpath_property("_filename")
def __init__(self, *, name, filename=None, axisSubsets=None, lib=None):
self.name: str = name
"""string, required. Name of this variable to identify it during the
build process and from other parts of the document, and also as a
filename in case the filename property is empty.
VarLib.
"""
self.filename: str = filename
"""string, optional. Relative path to the variable font file, **as it is
in the document**. The file may or may not exist.
If not specified, the :attr:`name` will be used as a basename for the file.
"""
self.axisSubsets: List[
Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]
] = (axisSubsets or [])
"""Axis subsets to include in this variable font.
If an axis is not mentioned, assume that we only want the default
location of that axis (same as a :class:`ValueAxisSubsetDescriptor`).
"""
self.lib: MutableMapping[str, Any] = lib or {}
"""Custom data associated with this variable font."""
class RangeAxisSubsetDescriptor(SimpleDescriptor):
"""Subset of a continuous axis to include in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ("name", "userMinimum", "userDefault", "userMaximum")
def __init__(
self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf
):
self.name: str = name
"""Name of the :class:`AxisDescriptor` to subset."""
self.userMinimum: float = userMinimum
"""New minimum value of the axis in the target variable font.
If not specified, assume the same minimum value as the full axis.
(default = ``-math.inf``)
"""
self.userDefault: Optional[float] = userDefault
"""New default value of the axis in the target variable font.
If not specified, assume the same default value as the full axis.
(default = ``None``)
"""
self.userMaximum: float = userMaximum
"""New maximum value of the axis in the target variable font.
If not specified, assume the same maximum value as the full axis.
(default = ``math.inf``)
"""
class ValueAxisSubsetDescriptor(SimpleDescriptor):
"""Single value of a discrete or continuous axis to use in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ("name", "userValue")
def __init__(self, *, name, userValue):
self.name: str = name
"""Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor`
to "snapshot" or "freeze".
"""
self.userValue: float = userValue
"""Value in user coordinates at which to freeze the given axis."""
class BaseDocWriter(object):
_whiteSpace = " "
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
axisMappingDescriptorClass = AxisMappingDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
@classmethod
def getAxisDecriptor(cls):
return cls.axisDescriptorClass()
@classmethod
def getAxisMappingDescriptor(cls):
return cls.axisMappingDescriptorClass()
@classmethod
def getSourceDescriptor(cls):
return cls.sourceDescriptorClass()
@classmethod
def getInstanceDescriptor(cls):
return cls.instanceDescriptorClass()
@classmethod
def getRuleDescriptor(cls):
return cls.ruleDescriptorClass()
def __init__(self, documentPath, documentObject: DesignSpaceDocument):
self.path = documentPath
self.documentObject = documentObject
self.effectiveFormatTuple = self._getEffectiveFormatTuple()
self.root = ET.Element("designspace")
def write(self, pretty=True, encoding="UTF-8", xml_declaration=True):
self.root.attrib["format"] = ".".join(str(i) for i in self.effectiveFormatTuple)
if (
self.documentObject.axes
or self.documentObject.axisMappings
or self.documentObject.elidedFallbackName is not None
):
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
axesElement.attrib["elidedfallbackname"] = (
self.documentObject.elidedFallbackName
)
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
if self.documentObject.axisMappings:
mappingsElement = None
lastGroup = object()
for mappingObject in self.documentObject.axisMappings:
if getattr(mappingObject, "groupDescription", None) != lastGroup:
if mappingsElement is not None:
self.root.findall(".axes")[0].append(mappingsElement)
lastGroup = getattr(mappingObject, "groupDescription", None)
mappingsElement = ET.Element("mappings")
if lastGroup is not None:
mappingsElement.attrib["description"] = lastGroup
self._addAxisMapping(mappingsElement, mappingObject)
if mappingsElement is not None:
self.root.findall(".axes")[0].append(mappingsElement)
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
for labelObject in self.documentObject.locationLabels:
self._addLocationLabel(labelsElement, labelObject)
self.root.append(labelsElement)
if self.documentObject.rules:
if getattr(self.documentObject, "rulesProcessingLast", False):
attributes = {"processing": "last"}
else:
attributes = {}
self.root.append(ET.Element("rules", attributes))
for ruleObject in self.documentObject.rules:
self._addRule(ruleObject)
if self.documentObject.sources:
self.root.append(ET.Element("sources"))
for sourceObject in self.documentObject.sources:
self._addSource(sourceObject)
if self.documentObject.variableFonts:
variableFontsElement = ET.Element("variable-fonts")
for variableFont in self.documentObject.variableFonts:
self._addVariableFont(variableFontsElement, variableFont)
self.root.append(variableFontsElement)
if self.documentObject.instances:
self.root.append(ET.Element("instances"))
for instanceObject in self.documentObject.instances:
self._addInstance(instanceObject)
if self.documentObject.lib:
self._addLib(self.root, self.documentObject.lib, 2)
tree = ET.ElementTree(self.root)
tree.write(
self.path,
encoding=encoding,
method="xml",
xml_declaration=xml_declaration,
pretty_print=pretty,
)
def _getEffectiveFormatTuple(self):
"""Try to use the version specified in the document, or a sufficiently
recent version to be able to encode what the document contains.
"""
minVersion = self.documentObject.formatTuple
if (
any(
hasattr(axis, "values")
or axis.axisOrdering is not None
or axis.axisLabels
for axis in self.documentObject.axes
)
or self.documentObject.locationLabels
or any(source.localisedFamilyName for source in self.documentObject.sources)
or self.documentObject.variableFonts
or any(
instance.locationLabel or instance.userLocation
for instance in self.documentObject.instances
)
):
if minVersion < (5, 0):
minVersion = (5, 0)
if self.documentObject.axisMappings:
if minVersion < (5, 1):
minVersion = (5, 1)
return minVersion
def _makeLocationElement(self, locationObject, name=None):
"""Convert Location dict to a locationElement."""
locElement = ET.Element("location")
if name is not None:
locElement.attrib["name"] = name
validatedLocation = self.documentObject.newDefaultLocation()
for axisName, axisValue in locationObject.items():
if axisName in validatedLocation:
# only accept values we know
validatedLocation[axisName] = axisValue
for dimensionName, dimensionValue in validatedLocation.items():
dimElement = ET.Element("dimension")
dimElement.attrib["name"] = dimensionName
if type(dimensionValue) == tuple:
dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue[0])
dimElement.attrib["yvalue"] = self.intOrFloat(dimensionValue[1])
else:
dimElement.attrib["xvalue"] = self.intOrFloat(dimensionValue)
locElement.append(dimElement)
return locElement, validatedLocation
def intOrFloat(self, num):
if int(num) == num:
return "%d" % num
return ("%f" % num).rstrip("0").rstrip(".")
def _addRule(self, ruleObject):
# if none of the conditions have minimum or maximum values, do not add the rule.
ruleElement = ET.Element("rule")
if ruleObject.name is not None:
ruleElement.attrib["name"] = ruleObject.name
for conditions in ruleObject.conditionSets:
conditionsetElement = ET.Element("conditionset")
for cond in conditions:
if cond.get("minimum") is None and cond.get("maximum") is None:
# neither is defined, don't add this condition
continue
conditionElement = ET.Element("condition")
conditionElement.attrib["name"] = cond.get("name")
if cond.get("minimum") is not None:
conditionElement.attrib["minimum"] = self.intOrFloat(
cond.get("minimum")
)
if cond.get("maximum") is not None:
conditionElement.attrib["maximum"] = self.intOrFloat(
cond.get("maximum")
)
conditionsetElement.append(conditionElement)
if len(conditionsetElement):
ruleElement.append(conditionsetElement)
for sub in ruleObject.subs:
subElement = ET.Element("sub")
subElement.attrib["name"] = sub[0]
subElement.attrib["with"] = sub[1]
ruleElement.append(subElement)
if len(ruleElement):
self.root.findall(".rules")[0].append(ruleElement)
def _addAxis(self, axisObject):
axisElement = ET.Element("axis")
axisElement.attrib["tag"] = axisObject.tag
axisElement.attrib["name"] = axisObject.name
self._addLabelNames(axisElement, axisObject.labelNames)
if axisObject.map:
for inputValue, outputValue in axisObject.map:
mapElement = ET.Element("map")
mapElement.attrib["input"] = self.intOrFloat(inputValue)
mapElement.attrib["output"] = self.intOrFloat(outputValue)
axisElement.append(mapElement)
if axisObject.axisOrdering is not None or axisObject.axisLabels:
labelsElement = ET.Element("labels")
if axisObject.axisOrdering is not None:
labelsElement.attrib["ordering"] = str(axisObject.axisOrdering)
for label in axisObject.axisLabels:
self._addAxisLabel(labelsElement, label)
axisElement.append(labelsElement)
if hasattr(axisObject, "minimum"):
axisElement.attrib["minimum"] = self.intOrFloat(axisObject.minimum)
axisElement.attrib["maximum"] = self.intOrFloat(axisObject.maximum)
elif hasattr(axisObject, "values"):
axisElement.attrib["values"] = " ".join(
self.intOrFloat(v) for v in axisObject.values
)
axisElement.attrib["default"] = self.intOrFloat(axisObject.default)
if axisObject.hidden:
axisElement.attrib["hidden"] = "1"
self.root.findall(".axes")[0].append(axisElement)
def _addAxisMapping(self, mappingsElement, mappingObject):
mappingElement = ET.Element("mapping")
if getattr(mappingObject, "description", None) is not None:
mappingElement.attrib["description"] = mappingObject.description
for what in ("inputLocation", "outputLocation"):
whatObject = getattr(mappingObject, what, None)
if whatObject is None:
continue
whatElement = ET.Element(what[:-8])
mappingElement.append(whatElement)
for name, value in whatObject.items():
dimensionElement = ET.Element("dimension")
dimensionElement.attrib["name"] = name
dimensionElement.attrib["xvalue"] = self.intOrFloat(value)
whatElement.append(dimensionElement)
mappingsElement.append(mappingElement)
def _addAxisLabel(
self, axisElement: ET.Element, label: AxisLabelDescriptor
) -> None:
labelElement = ET.Element("label")
labelElement.attrib["uservalue"] = self.intOrFloat(label.userValue)
if label.userMinimum is not None:
labelElement.attrib["userminimum"] = self.intOrFloat(label.userMinimum)
if label.userMaximum is not None:
labelElement.attrib["usermaximum"] = self.intOrFloat(label.userMaximum)
labelElement.attrib["name"] = label.name
if label.elidable:
labelElement.attrib["elidable"] = "true"
if label.olderSibling:
labelElement.attrib["oldersibling"] = "true"
if label.linkedUserValue is not None:
labelElement.attrib["linkeduservalue"] = self.intOrFloat(
label.linkedUserValue
)
self._addLabelNames(labelElement, label.labelNames)
axisElement.append(labelElement)
def _addLabelNames(self, parentElement, labelNames):
for languageCode, labelName in sorted(labelNames.items()):
languageElement = ET.Element("labelname")
languageElement.attrib[XML_LANG] = languageCode
languageElement.text = labelName
parentElement.append(languageElement)
def _addLocationLabel(
self, parentElement: ET.Element, label: LocationLabelDescriptor
) -> None:
labelElement = ET.Element("label")
labelElement.attrib["name"] = label.name
if label.elidable:
labelElement.attrib["elidable"] = "true"
if label.olderSibling:
labelElement.attrib["oldersibling"] = "true"
self._addLabelNames(labelElement, label.labelNames)
self._addLocationElement(labelElement, userLocation=label.userLocation)
parentElement.append(labelElement)
def _addLocationElement(
self,
parentElement,
*,
designLocation: AnisotropicLocationDict = None,
userLocation: SimpleLocationDict = None,
):
locElement = ET.Element("location")
for axis in self.documentObject.axes:
if designLocation is not None and axis.name in designLocation:
dimElement = ET.Element("dimension")
dimElement.attrib["name"] = axis.name
value = designLocation[axis.name]
if isinstance(value, tuple):
dimElement.attrib["xvalue"] = self.intOrFloat(value[0])
dimElement.attrib["yvalue"] = self.intOrFloat(value[1])
else:
dimElement.attrib["xvalue"] = self.intOrFloat(value)
locElement.append(dimElement)
elif userLocation is not None and axis.name in userLocation:
dimElement = ET.Element("dimension")
dimElement.attrib["name"] = axis.name
value = userLocation[axis.name]
dimElement.attrib["uservalue"] = self.intOrFloat(value)
locElement.append(dimElement)
if len(locElement) > 0:
parentElement.append(locElement)
def _addInstance(self, instanceObject):
instanceElement = ET.Element("instance")
if instanceObject.name is not None:
instanceElement.attrib["name"] = instanceObject.name
if instanceObject.locationLabel is not None:
instanceElement.attrib["location"] = instanceObject.locationLabel
if instanceObject.familyName is not None:
instanceElement.attrib["familyname"] = instanceObject.familyName
if instanceObject.styleName is not None:
instanceElement.attrib["stylename"] = instanceObject.styleName
# add localisations
if instanceObject.localisedStyleName:
languageCodes = list(instanceObject.localisedStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedStyleNameElement = ET.Element("stylename")
localisedStyleNameElement.attrib[XML_LANG] = code
localisedStyleNameElement.text = instanceObject.getStyleName(code)
instanceElement.append(localisedStyleNameElement)
if instanceObject.localisedFamilyName:
languageCodes = list(instanceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element("familyname")
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = instanceObject.getFamilyName(code)
instanceElement.append(localisedFamilyNameElement)
if instanceObject.localisedStyleMapStyleName:
languageCodes = list(instanceObject.localisedStyleMapStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapStyleNameElement = ET.Element("stylemapstylename")
localisedStyleMapStyleNameElement.attrib[XML_LANG] = code
localisedStyleMapStyleNameElement.text = (
instanceObject.getStyleMapStyleName(code)
)
instanceElement.append(localisedStyleMapStyleNameElement)
if instanceObject.localisedStyleMapFamilyName:
languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapFamilyNameElement = ET.Element("stylemapfamilyname")
localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code
localisedStyleMapFamilyNameElement.text = (
instanceObject.getStyleMapFamilyName(code)
)
instanceElement.append(localisedStyleMapFamilyNameElement)
if self.effectiveFormatTuple >= (5, 0):
if instanceObject.locationLabel is None:
self._addLocationElement(
instanceElement,
designLocation=instanceObject.designLocation,
userLocation=instanceObject.userLocation,
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
if instanceObject.location is not None:
locationElement, instanceObject.location = self._makeLocationElement(
instanceObject.location
)
instanceElement.append(locationElement)
if instanceObject.filename is not None:
instanceElement.attrib["filename"] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
instanceElement.attrib["postscriptfontname"] = (
instanceObject.postScriptFontName
)
if instanceObject.styleMapFamilyName is not None:
instanceElement.attrib["stylemapfamilyname"] = (
instanceObject.styleMapFamilyName
)
if instanceObject.styleMapStyleName is not None:
instanceElement.attrib["stylemapstylename"] = (
instanceObject.styleMapStyleName
)
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
if instanceElement.findall(".glyphs") == []:
glyphsElement = ET.Element("glyphs")
instanceElement.append(glyphsElement)
glyphsElement = instanceElement.findall(".glyphs")[0]
for glyphName, data in sorted(instanceObject.glyphs.items()):
glyphElement = self._writeGlyphElement(
instanceElement, instanceObject, glyphName, data
)
glyphsElement.append(glyphElement)
if instanceObject.kerning:
kerningElement = ET.Element("kerning")
instanceElement.append(kerningElement)
if instanceObject.info:
infoElement = ET.Element("info")
instanceElement.append(infoElement)
self._addLib(instanceElement, instanceObject.lib, 4)
self.root.findall(".instances")[0].append(instanceElement)
def _addSource(self, sourceObject):
sourceElement = ET.Element("source")
if sourceObject.filename is not None:
sourceElement.attrib["filename"] = sourceObject.filename
if sourceObject.name is not None:
if sourceObject.name.find("temp_master") != 0:
# do not save temporary source names
sourceElement.attrib["name"] = sourceObject.name
if sourceObject.familyName is not None:
sourceElement.attrib["familyname"] = sourceObject.familyName
if sourceObject.styleName is not None:
sourceElement.attrib["stylename"] = sourceObject.styleName
if sourceObject.layerName is not None:
sourceElement.attrib["layer"] = sourceObject.layerName
if sourceObject.localisedFamilyName:
languageCodes = list(sourceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element("familyname")
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = sourceObject.getFamilyName(code)
sourceElement.append(localisedFamilyNameElement)
if sourceObject.copyLib:
libElement = ET.Element("lib")
libElement.attrib["copy"] = "1"
sourceElement.append(libElement)
if sourceObject.copyGroups:
groupsElement = ET.Element("groups")
groupsElement.attrib["copy"] = "1"
sourceElement.append(groupsElement)
if sourceObject.copyFeatures:
featuresElement = ET.Element("features")
featuresElement.attrib["copy"] = "1"
sourceElement.append(featuresElement)
if sourceObject.copyInfo or sourceObject.muteInfo:
infoElement = ET.Element("info")
if sourceObject.copyInfo:
infoElement.attrib["copy"] = "1"
if sourceObject.muteInfo:
infoElement.attrib["mute"] = "1"
sourceElement.append(infoElement)
if sourceObject.muteKerning:
kerningElement = ET.Element("kerning")
kerningElement.attrib["mute"] = "1"
sourceElement.append(kerningElement)
if sourceObject.mutedGlyphNames:
for name in sourceObject.mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
glyphElement.attrib["mute"] = "1"
sourceElement.append(glyphElement)
if self.effectiveFormatTuple >= (5, 0):
self._addLocationElement(
sourceElement, designLocation=sourceObject.location
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
locationElement, sourceObject.location = self._makeLocationElement(
sourceObject.location
)
sourceElement.append(locationElement)
self.root.findall(".sources")[0].append(sourceElement)
def _addVariableFont(
self, parentElement: ET.Element, vf: VariableFontDescriptor
) -> None:
vfElement = ET.Element("variable-font")
vfElement.attrib["name"] = vf.name
if vf.filename is not None:
vfElement.attrib["filename"] = vf.filename
if vf.axisSubsets:
subsetsElement = ET.Element("axis-subsets")
for subset in vf.axisSubsets:
subsetElement = ET.Element("axis-subset")
subsetElement.attrib["name"] = subset.name
# Mypy doesn't support narrowing union types via hasattr()
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
# TODO(Python 3.10): use TypeGuard
if hasattr(subset, "userMinimum"):
subset = cast(RangeAxisSubsetDescriptor, subset)
if subset.userMinimum != -math.inf:
subsetElement.attrib["userminimum"] = self.intOrFloat(
subset.userMinimum
)
if subset.userMaximum != math.inf:
subsetElement.attrib["usermaximum"] = self.intOrFloat(
subset.userMaximum
)
if subset.userDefault is not None:
subsetElement.attrib["userdefault"] = self.intOrFloat(
subset.userDefault
)
elif hasattr(subset, "userValue"):
subset = cast(ValueAxisSubsetDescriptor, subset)
subsetElement.attrib["uservalue"] = self.intOrFloat(
subset.userValue
)
subsetsElement.append(subsetElement)
vfElement.append(subsetsElement)
self._addLib(vfElement, vf.lib, 4)
parentElement.append(vfElement)
def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:
if not data:
return
libElement = ET.Element("lib")
libElement.append(plistlib.totree(data, indent_level=indent_level))
parentElement.append(libElement)
def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):
glyphElement = ET.Element("glyph")
if data.get("mute"):
glyphElement.attrib["mute"] = "1"
if data.get("unicodes") is not None:
glyphElement.attrib["unicode"] = " ".join(
[hex(u) for u in data.get("unicodes")]
)
if data.get("instanceLocation") is not None:
locationElement, data["instanceLocation"] = self._makeLocationElement(
data.get("instanceLocation")
)
glyphElement.append(locationElement)
if glyphName is not None:
glyphElement.attrib["name"] = glyphName
if data.get("note") is not None:
noteElement = ET.Element("note")
noteElement.text = data.get("note")
glyphElement.append(noteElement)
if data.get("masters") is not None:
mastersElement = ET.Element("masters")
for m in data.get("masters"):
masterElement = ET.Element("master")
if m.get("glyphName") is not None:
masterElement.attrib["glyphname"] = m.get("glyphName")
if m.get("font") is not None:
masterElement.attrib["source"] = m.get("font")
if m.get("location") is not None:
locationElement, m["location"] = self._makeLocationElement(
m.get("location")
)
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
return glyphElement
class BaseDocReader(LogMixin):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
axisMappingDescriptorClass = AxisMappingDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontsDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
def __init__(self, documentPath, documentObject):
self.path = documentPath
self.documentObject = documentObject
tree = ET.parse(self.path)
self.root = tree.getroot()
self.documentObject.formatVersion = self.root.attrib.get("format", "3.0")
self._axes = []
self.rules = []
self.sources = []
self.instances = []
self.axisDefaults = {}
self._strictAxisNames = True
@classmethod
def fromstring(cls, string, documentObject):
f = BytesIO(tobytes(string, encoding="utf-8"))
self = cls(f, documentObject)
self.path = None
return self
def read(self):
self.readAxes()
self.readLabels()
self.readRules()
self.readVariableFonts()
self.readSources()
self.readInstances()
self.readLib()
def readRules(self):
# we also need to read any conditions that are outside of a condition set.
rules = []
rulesElement = self.root.find(".rules")
if rulesElement is not None:
processingValue = rulesElement.attrib.get("processing", "first")
if processingValue not in {"first", "last"}:
raise DesignSpaceDocumentError(
" processing attribute value is not valid: %r, "
"expected 'first' or 'last'" % processingValue
)
self.documentObject.rulesProcessingLast = processingValue == "last"
for ruleElement in self.root.findall(".rules/rule"):
ruleObject = self.ruleDescriptorClass()
ruleName = ruleObject.name = ruleElement.attrib.get("name")
# read any stray conditions outside a condition set
externalConditions = self._readConditionElements(
ruleElement,
ruleName,
)
if externalConditions:
ruleObject.conditionSets.append(externalConditions)
self.log.info(
"Found stray rule conditions outside a conditionset. "
"Wrapped them in a new conditionset."
)
# read the conditionsets
for conditionSetElement in ruleElement.findall(".conditionset"):
conditionSet = self._readConditionElements(
conditionSetElement,
ruleName,
)
if conditionSet is not None:
ruleObject.conditionSets.append(conditionSet)
for subElement in ruleElement.findall(".sub"):
a = subElement.attrib["name"]
b = subElement.attrib["with"]
ruleObject.subs.append((a, b))
rules.append(ruleObject)
self.documentObject.rules = rules
def _readConditionElements(self, parentElement, ruleName=None):
cds = []
for conditionElement in parentElement.findall(".condition"):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
cd["minimum"] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
cd["minimum"] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
cd["maximum"] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
cd["maximum"] = None
cd["name"] = conditionElement.attrib.get("name")
# # test for things
if cd.get("minimum") is None and cd.get("maximum") is None:
raise DesignSpaceDocumentError(
"condition missing required minimum or maximum in rule"
+ (" '%s'" % ruleName if ruleName is not None else "")
)
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axesElement = self.root.find(".axes")
if axesElement is not None and "elidedfallbackname" in axesElement.attrib:
self.documentObject.elidedFallbackName = axesElement.attrib[
"elidedfallbackname"
]
axisElements = self.root.findall(".axes/axis")
if not axisElements:
return
for axisElement in axisElements:
if (
self.documentObject.formatTuple >= (5, 0)
and "values" in axisElement.attrib
):
axisObject = self.discreteAxisDescriptorClass()
axisObject.values = [
float(s) for s in axisElement.attrib["values"].split(" ")
]
else:
axisObject = self.axisDescriptorClass()
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("default"))
axisObject.name = axisElement.attrib.get("name")
if axisElement.attrib.get("hidden", False):
axisObject.hidden = True
axisObject.tag = axisElement.attrib.get("tag")
for mapElement in axisElement.findall("map"):
a = float(mapElement.attrib["input"])
b = float(mapElement.attrib["output"])
axisObject.map.append((a, b))
for labelNameElement in axisElement.findall("labelname"):
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
for key, lang in labelNameElement.items():
if key == XML_LANG:
axisObject.labelNames[lang] = tostr(labelNameElement.text)
labelElement = axisElement.find(".labels")
if labelElement is not None:
if "ordering" in labelElement.attrib:
axisObject.axisOrdering = int(labelElement.attrib["ordering"])
for label in labelElement.findall(".label"):
axisObject.axisLabels.append(self.readAxisLabel(label))
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
self.documentObject.axisMappings = []
for mappingsElement in self.root.findall(".axes/mappings"):
groupDescription = mappingsElement.attrib.get("description")
for mappingElement in mappingsElement.findall("mapping"):
description = mappingElement.attrib.get("description")
inputElement = mappingElement.find("input")
outputElement = mappingElement.find("output")
inputLoc = {}
outputLoc = {}
for dimElement in inputElement.findall(".dimension"):
name = dimElement.attrib["name"]
value = float(dimElement.attrib["xvalue"])
inputLoc[name] = value
for dimElement in outputElement.findall(".dimension"):
name = dimElement.attrib["name"]
value = float(dimElement.attrib["xvalue"])
outputLoc[name] = value
axisMappingObject = self.axisMappingDescriptorClass(
inputLocation=inputLoc,
outputLocation=outputLoc,
description=description,
groupDescription=groupDescription,
)
self.documentObject.axisMappings.append(axisMappingObject)
def readAxisLabel(self, element: ET.Element):
xml_attrs = {
"userminimum",
"uservalue",
"usermaximum",
"name",
"elidable",
"oldersibling",
"linkeduservalue",
}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(
f"label element contains unknown attributes: {', '.join(unknown_attrs)}"
)
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
valueStr = element.get("uservalue")
if valueStr is None:
raise DesignSpaceDocumentError(
"label element must have a uservalue attribute."
)
value = float(valueStr)
minimumStr = element.get("userminimum")
minimum = float(minimumStr) if minimumStr is not None else None
maximumStr = element.get("usermaximum")
maximum = float(maximumStr) if maximumStr is not None else None
linkedValueStr = element.get("linkeduservalue")
linkedValue = float(linkedValueStr) if linkedValueStr is not None else None
elidable = True if element.get("elidable") == "true" else False
olderSibling = True if element.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in element.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
return self.axisLabelDescriptorClass(
name=name,
userValue=value,
userMinimum=minimum,
userMaximum=maximum,
elidable=elidable,
olderSibling=olderSibling,
linkedUserValue=linkedValue,
labelNames=labelNames,
)
def readLabels(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {"name", "elidable", "oldersibling"}
for labelElement in self.root.findall(".labels/label"):
unknown_attrs = set(labelElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(
f"Label element contains unknown attributes: {', '.join(unknown_attrs)}"
)
name = labelElement.get("name")
if name is None:
raise DesignSpaceDocumentError(
"label element must have a name attribute."
)
designLocation, userLocation = self.locationFromElement(labelElement)
if designLocation:
raise DesignSpaceDocumentError(
f'
"""Extend the Python codecs module with a few encodings that are used in OpenType (name table)
but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details."""
import codecs
import encodings
class ExtendCodec(codecs.Codec):
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v: k for k, v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(
name=self.name, encode=self.encode, decode=self.decode
)
codecs.register_error(name, self.error)
def _map(self, mapper, output_type, exc_type, input, errors):
base_error_handler = codecs.lookup_error(errors)
length = len(input)
out = output_type()
while input:
# first try to use self.error as the error handler
try:
part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[: e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def encode(self, input, errors="strict"):
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
def decode(self, input, errors="strict"):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def error(self, e):
if isinstance(e, UnicodeDecodeError):
for end in range(e.start + 1, e.end + 1):
s = e.object[e.start : end]
if s in self.mapping:
return self.mapping[s], end
elif isinstance(e, UnicodeEncodeError):
for end in range(e.start + 1, e.start + self.max_len + 1):
s = e.object[e.start : end]
if s in self.reverse:
return self.reverse[s], end
e.encoding = self.name
raise e
_extended_encodings = {
"x_mac_japanese_ttx": (
"shift_jis",
{
b"\xFC": chr(0x007C),
b"\x7E": chr(0x007E),
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_trad_chinese_ttx": (
"big5",
{
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_korean_ttx": (
"euc_kr",
{
b"\x80": chr(0x00A0),
b"\x81": chr(0x20A9),
b"\x82": chr(0x2014),
b"\x83": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_simp_chinese_ttx": (
"gb2312",
{
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
}
_cache = {}
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert name[-4:] == "_ttx"
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None
codecs.register(search_function)
"""Empty __init__.py file to signal Python this directory is a package."""
venv\Lib\site-packages\fontTools\feaLib\ast.py
import weakref
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.location import FeatureLibLocation
from fontTools.misc.encodingTools import getEncoding
from fontTools.misc.textTools import byteord, tobytes
from collections import OrderedDict
import itertools
SHIFT = " " * 4
__all__ = [
"Element",
"FeatureFile",
"Comment",
"GlyphName",
"GlyphClass",
"GlyphClassName",
"MarkClassName",
"AnonymousBlock",
"Block",
"FeatureBlock",
"NestedBlock",
"LookupBlock",
"GlyphClassDefinition",
"GlyphClassDefStatement",
"MarkClass",
"MarkClassDefinition",
"AlternateSubstStatement",
"Anchor",
"AnchorDefinition",
"AttachStatement",
"AxisValueLocationStatement",
"BaseAxis",
"CVParametersNameStatement",
"ChainContextPosStatement",
"ChainContextSubstStatement",
"CharacterStatement",
"ConditionsetStatement",
"CursivePosStatement",
"ElidedFallbackName",
"ElidedFallbackNameID",
"Expression",
"FeatureNameStatement",
"FeatureReferenceStatement",
"FontRevisionStatement",
"HheaField",
"IgnorePosStatement",
"IgnoreSubstStatement",
"IncludeStatement",
"LanguageStatement",
"LanguageSystemStatement",
"LigatureCaretByIndexStatement",
"LigatureCaretByPosStatement",
"LigatureSubstStatement",
"LookupFlagStatement",
"LookupReferenceStatement",
"MarkBasePosStatement",
"MarkLigPosStatement",
"MarkMarkPosStatement",
"MultipleSubstStatement",
"NameRecord",
"OS2Field",
"PairPosStatement",
"ReverseChainSingleSubstStatement",
"ScriptStatement",
"SinglePosStatement",
"SingleSubstStatement",
"SizeParameters",
"Statement",
"STATAxisValueStatement",
"STATDesignAxisStatement",
"STATNameStatement",
"SubtableStatement",
"TableBlock",
"ValueRecord",
"ValueRecordDefinition",
"VheaField",
]
def deviceToString(device):
if device is None:
return ""
else:
return "" % ", ".join("%d %d" % t for t in device)
fea_keywords = set(
[
"anchor",
"anchordef",
"anon",
"anonymous",
"by",
"contour",
"cursive",
"device",
"enum",
"enumerate",
"excludedflt",
"exclude_dflt",
"feature",
"from",
"ignore",
"ignorebaseglyphs",
"ignoreligatures",
"ignoremarks",
"include",
"includedflt",
"include_dflt",
"language",
"languagesystem",
"lookup",
"lookupflag",
"mark",
"markattachmenttype",
"markclass",
"nameid",
"null",
"parameters",
"pos",
"position",
"required",
"righttoleft",
"reversesub",
"rsub",
"script",
"sub",
"substitute",
"subtable",
"table",
"usemarkfilteringset",
"useextension",
"valuerecorddef",
"base",
"gdef",
"head",
"hhea",
"name",
"vhea",
"vmtx",
]
)
def asFea(g):
if hasattr(g, "asFea"):
return g.asFea()
elif isinstance(g, tuple) and len(g) == 2:
return asFea(g[0]) + " - " + asFea(g[1]) # a range
elif g.lower() in fea_keywords:
return "\\" + g
else:
return g
class Element(object):
"""A base class representing "something" in a feature file."""
def __init__(self, location=None):
#: location of this element as a `FeatureLibLocation` object.
if location and not isinstance(location, FeatureLibLocation):
location = FeatureLibLocation(*location)
self.location = location
def build(self, builder):
pass
def asFea(self, indent=""):
"""Returns this element as a string of feature code. For block-type
elements (such as :class:`FeatureBlock`), the `indent` string is
added to the start of each line in the output."""
raise NotImplementedError
def __str__(self):
return self.asFea()
class Statement(Element):
pass
class Expression(Element):
pass
class Comment(Element):
"""A comment in a feature file."""
def __init__(self, text, location=None):
super(Comment, self).__init__(location)
#: Text of the comment
self.text = text
def asFea(self, indent=""):
return self.text
class NullGlyph(Expression):
"""The NULL glyph, used in glyph deletion substitutions."""
def __init__(self, location=None):
Expression.__init__(self, location)
#: The name itself as a string
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return ()
def asFea(self, indent=""):
return "NULL"
class GlyphName(Expression):
"""A single glyph name, such as ``cedilla``."""
def __init__(self, glyph, location=None):
Expression.__init__(self, location)
#: The name itself as a string
self.glyph = glyph
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return (self.glyph,)
def asFea(self, indent=""):
return asFea(self.glyph)
class GlyphClass(Expression):
"""A glyph class, such as ``[acute cedilla grave]``."""
def __init__(self, glyphs=None, location=None):
Expression.__init__(self, location)
#: The list of glyphs in this class, as :class:`GlyphName` objects.
self.glyphs = glyphs if glyphs is not None else []
self.original = []
self.curr = 0
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs)
def asFea(self, indent=""):
if len(self.original):
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.curr = len(self.glyphs)
return "[" + " ".join(map(asFea, self.original)) + "]"
else:
return "[" + " ".join(map(asFea, self.glyphs)) + "]"
def extend(self, glyphs):
"""Add a list of :class:`GlyphName` objects to the class."""
self.glyphs.extend(glyphs)
def append(self, glyph):
"""Add a single :class:`GlyphName` object to the class."""
self.glyphs.append(glyph)
def add_range(self, start, end, glyphs):
"""Add a range (e.g. ``A-Z``) to the class. ``start`` and ``end``
are either :class:`GlyphName` objects or strings representing the
start and end glyphs in the class, and ``glyphs`` is the full list of
:class:`GlyphName` objects in the range."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append((start, end))
self.glyphs.extend(glyphs)
self.curr = len(self.glyphs)
def add_cid_range(self, start, end, glyphs):
"""Add a range to the class by glyph ID. ``start`` and ``end`` are the
initial and final IDs, and ``glyphs`` is the full list of
:class:`GlyphName` objects in the range."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append(("\\{}".format(start), "\\{}".format(end)))
self.glyphs.extend(glyphs)
self.curr = len(self.glyphs)
def add_class(self, gc):
"""Add glyphs from the given :class:`GlyphClassName` object to the
class."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append(gc)
self.glyphs.extend(gc.glyphSet())
self.curr = len(self.glyphs)
class GlyphClassName(Expression):
"""A glyph class name, such as ``@FRENCH_MARKS``. This must be instantiated
with a :class:`GlyphClassDefinition` object."""
def __init__(self, glyphclass, location=None):
Expression.__init__(self, location)
assert isinstance(glyphclass, GlyphClassDefinition)
self.glyphclass = glyphclass
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphclass.glyphSet())
def asFea(self, indent=""):
return "@" + self.glyphclass.name
class MarkClassName(Expression):
"""A mark class name, such as ``@FRENCH_MARKS`` defined with ``markClass``.
This must be instantiated with a :class:`MarkClass` object."""
def __init__(self, markClass, location=None):
Expression.__init__(self, location)
assert isinstance(markClass, MarkClass)
self.markClass = markClass
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return self.markClass.glyphSet()
def asFea(self, indent=""):
return "@" + self.markClass.name
class AnonymousBlock(Statement):
"""An anonymous data block."""
def __init__(self, tag, content, location=None):
Statement.__init__(self, location)
self.tag = tag #: string containing the block's "tag"
self.content = content #: block data as string
def asFea(self, indent=""):
res = "anon {} {{\n".format(self.tag)
res += self.content
res += "}} {};\n\n".format(self.tag)
return res
class Block(Statement):
"""A block of statements: feature, lookup, etc."""
def __init__(self, location=None):
Statement.__init__(self, location)
self.statements = [] #: Statements contained in the block
def build(self, builder):
"""When handed a 'builder' object of comparable interface to
:class:`fontTools.feaLib.builder`, walks the statements in this
block, calling the builder callbacks."""
for s in self.statements:
s.build(builder)
def asFea(self, indent=""):
indent += SHIFT
return (
indent
+ ("\n" + indent).join([s.asFea(indent=indent) for s in self.statements])
+ "\n"
)
class FeatureFile(Block):
"""The top-level element of the syntax tree, containing the whole feature
file in its ``statements`` attribute."""
def __init__(self):
Block.__init__(self, location=None)
self.markClasses = {} # name --> ast.MarkClass
def asFea(self, indent=""):
return "\n".join(s.asFea(indent=indent) for s in self.statements)
class FeatureBlock(Block):
"""A named feature block."""
def __init__(self, name, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.use_extension = name, use_extension
def build(self, builder):
"""Call the ``start_feature`` callback on the builder object, visit
all the statements in this feature, and then call ``end_feature``."""
builder.start_feature(self.location, self.name, self.use_extension)
# language exclude_dflt statements modify builder.features_
# limit them to this block with temporary builder.features_
features = builder.features_
builder.features_ = {}
Block.build(self, builder)
for key, value in builder.features_.items():
features.setdefault(key, []).extend(value)
builder.features_ = features
builder.end_feature()
def asFea(self, indent=""):
res = indent + "feature %s " % self.name.strip()
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += indent + "} %s;\n" % self.name.strip()
return res
class NestedBlock(Block):
"""A block inside another block, for example when found inside a
``cvParameters`` block."""
def __init__(self, tag, block_name, location=None):
Block.__init__(self, location)
self.tag = tag
self.block_name = block_name
def build(self, builder):
Block.build(self, builder)
if self.block_name == "ParamUILabelNameID":
builder.add_to_cv_num_named_params(self.tag)
def asFea(self, indent=""):
res = "{}{} {{\n".format(indent, self.block_name)
res += Block.asFea(self, indent=indent)
res += "{}}};\n".format(indent)
return res
class LookupBlock(Block):
"""A named lookup, containing ``statements``."""
def __init__(self, name, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.use_extension = name, use_extension
def build(self, builder):
builder.start_lookup_block(self.location, self.name, self.use_extension)
Block.build(self, builder)
builder.end_lookup_block()
def asFea(self, indent=""):
res = "lookup {} ".format(self.name)
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += "{}}} {};\n".format(indent, self.name)
return res
class TableBlock(Block):
"""A ``table ... { }`` block."""
def __init__(self, name, location=None):
Block.__init__(self, location)
self.name = name
def asFea(self, indent=""):
res = "table {} {{\n".format(self.name.strip())
res += super(TableBlock, self).asFea(indent=indent)
res += "}} {};\n".format(self.name.strip())
return res
class GlyphClassDefinition(Statement):
"""Example: ``@UPPERCASE = [A-Z];``."""
def __init__(self, name, glyphs, location=None):
Statement.__init__(self, location)
self.name = name #: class name as a string, without initial ``@``
self.glyphs = glyphs #: a :class:`GlyphClass` object
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs.glyphSet())
def asFea(self, indent=""):
return "@" + self.name + " = " + self.glyphs.asFea() + ";"
class GlyphClassDefStatement(Statement):
"""Example: ``GlyphClassDef @UPPERCASE, [B], [C], [D];``. The parameters
must be either :class:`GlyphClass` or :class:`GlyphClassName` objects, or
``None``."""
def __init__(
self, baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=None
):
Statement.__init__(self, location)
self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs)
self.ligatureGlyphs = ligatureGlyphs
self.componentGlyphs = componentGlyphs
def build(self, builder):
"""Calls the builder's ``add_glyphClassDef`` callback."""
base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple()
liga = self.ligatureGlyphs.glyphSet() if self.ligatureGlyphs else tuple()
mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple()
comp = self.componentGlyphs.glyphSet() if self.componentGlyphs else tuple()
builder.add_glyphClassDef(self.location, base, liga, mark, comp)
def asFea(self, indent=""):
return "GlyphClassDef {}, {}, {}, {};".format(
self.baseGlyphs.asFea() if self.baseGlyphs else "",
self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "",
self.markGlyphs.asFea() if self.markGlyphs else "",
self.componentGlyphs.asFea() if self.componentGlyphs else "",
)
class MarkClass(object):
"""One `or more` ``markClass`` statements for the same mark class.
While glyph classes can be defined only once, the feature file format
allows expanding mark classes with multiple definitions, each using
different glyphs and anchors. The following are two ``MarkClassDefinitions``
for the same ``MarkClass``::
markClass [acute grave] @FRENCH_ACCENTS;
markClass [cedilla] @FRENCH_ACCENTS;
The ``MarkClass`` object is therefore just a container for a list of
:class:`MarkClassDefinition` statements.
"""
def __init__(self, name):
self.name = name
self.definitions = []
self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions
def addDefinition(self, definition):
"""Add a :class:`MarkClassDefinition` statement to this mark class."""
assert isinstance(definition, MarkClassDefinition)
self.definitions.append(weakref.proxy(definition))
for glyph in definition.glyphSet():
if glyph in self.glyphs:
otherLoc = self.glyphs[glyph].location
if otherLoc is None:
end = ""
else:
end = f" at {otherLoc}"
raise FeatureLibError(
"Glyph %s already defined%s" % (glyph, end), definition.location
)
self.glyphs[glyph] = definition
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs.keys())
def asFea(self, indent=""):
res = "\n".join(d.asFea() for d in self.definitions)
return res
class MarkClassDefinition(Statement):
"""A single ``markClass`` statement. The ``markClass`` should be a
:class:`MarkClass` object, the ``anchor`` an :class:`Anchor` object,
and the ``glyphs`` parameter should be a `glyph-containing object`_ .
Example:
.. code:: python
mc = MarkClass("FRENCH_ACCENTS")
mc.addDefinition( MarkClassDefinition(mc, Anchor(350, 800),
GlyphClass([ GlyphName("acute"), GlyphName("grave") ])
) )
mc.addDefinition( MarkClassDefinition(mc, Anchor(350, -200),
GlyphClass([ GlyphName("cedilla") ])
) )
mc.asFea()
# markClass [acute grave] @FRENCH_ACCENTS;
# markClass [cedilla] @FRENCH_ACCENTS;
"""
def __init__(self, markClass, anchor, glyphs, location=None):
Statement.__init__(self, location)
assert isinstance(markClass, MarkClass)
assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression)
self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return self.glyphs.glyphSet()
def asFea(self, indent=""):
return "markClass {} {} @{};".format(
self.glyphs.asFea(), self.anchor.asFea(), self.markClass.name
)
class AlternateSubstStatement(Statement):
"""A ``sub ... from ...`` statement.
``glyph`` and ``replacement`` should be `glyph-containing objects`_.
``prefix`` and ``suffix`` should be lists of `glyph-containing objects`_."""
def __init__(self, prefix, glyph, suffix, replacement, location=None):
Statement.__init__(self, location)
self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix)
self.replacement = replacement
def build(self, builder):
"""Calls the builder's ``add_alternate_subst`` callback."""
glyph = self.glyph.glyphSet()
assert len(glyph) == 1, glyph
glyph = list(glyph)[0]
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
replacement = self.replacement.glyphSet()
builder.add_alternate_subst(self.location, prefix, glyph, suffix, replacement)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix):
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += asFea(self.glyph) + "'" # even though we really only use 1
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += asFea(self.glyph)
res += " from "
res += asFea(self.replacement)
res += ";"
return res
class Anchor(Expression):
"""An ``Anchor`` element, used inside a ``pos`` rule.
If a ``name`` is given, this will be used in preference to the coordinates.
Other values should be integer.
"""
def __init__(
self,
x,
y,
name=None,
contourpoint=None,
xDeviceTable=None,
yDeviceTable=None,
location=None,
):
Expression.__init__(self, location)
self.name = name
self.x, self.y, self.contourpoint = x, y, contourpoint
self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable
def asFea(self, indent=""):
if self.name is not None:
return "".format(self.name)
res = ""
return res
class AnchorDefinition(Statement):
"""A named anchor definition. (2.e.viii). ``name`` should be a string."""
def __init__(self, name, x, y, contourpoint=None, location=None):
Statement.__init__(self, location)
self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint
def asFea(self, indent=""):
res = "anchorDef {} {}".format(self.x, self.y)
if self.contourpoint:
res += " contourpoint {}".format(self.contourpoint)
res += " {};".format(self.name)
return res
class AttachStatement(Statement):
"""A ``GDEF`` table ``Attach`` statement."""
def __init__(self, glyphs, contourPoints, location=None):
Statement.__init__(self, location)
self.glyphs = glyphs #: A `glyph-containing object`_
self.contourPoints = contourPoints #: A list of integer contour points
def build(self, builder):
"""Calls the builder's ``add_attach_points`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_attach_points(self.location, glyphs, self.contourPoints)
def asFea(self, indent=""):
return "Attach {} {};".format(
self.glyphs.asFea(), " ".join(str(c) for c in self.contourPoints)
)
class ChainContextPosStatement(Statement):
r"""A chained contextual positioning statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
``lookups`` should be a list of elements representing what lookups
to apply at each glyph position. Each element should be a
:class:`LookupBlock` to apply a single chaining lookup at the given
position, a list of :class:`LookupBlock`\ s to apply multiple
lookups, or ``None`` to apply no lookup. The length of the outer
list should equal the length of ``glyphs``; the inner lists can be
of variable length."""
def __init__(self, prefix, glyphs, suffix, lookups, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix
self.lookups = list(lookups)
for i, lookup in enumerate(lookups):
if lookup:
try:
iter(lookup)
except TypeError:
self.lookups[i] = [lookup]
def build(self, builder):
"""Calls the builder's ``add_chain_context_pos`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_chain_context_pos(
self.location, prefix, glyphs, suffix, self.lookups
)
def asFea(self, indent=""):
res = "pos "
if (
len(self.prefix)
or len(self.suffix)
or any([x is not None for x in self.lookups])
):
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
for i, g in enumerate(self.glyphs):
res += g.asFea() + "'"
if self.lookups[i]:
for lu in self.lookups[i]:
res += " lookup " + lu.name
if i < len(self.glyphs) - 1:
res += " "
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(map(asFea, self.glyphs))
res += ";"
return res
class ChainContextSubstStatement(Statement):
r"""A chained contextual substitution statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
``lookups`` should be a list of elements representing what lookups
to apply at each glyph position. Each element should be a
:class:`LookupBlock` to apply a single chaining lookup at the given
position, a list of :class:`LookupBlock`\ s to apply multiple
lookups, or ``None`` to apply no lookup. The length of the outer
list should equal the length of ``glyphs``; the inner lists can be
of variable length."""
def __init__(self, prefix, glyphs, suffix, lookups, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix
self.lookups = list(lookups)
for i, lookup in enumerate(lookups):
if lookup:
try:
iter(lookup)
except TypeError:
self.lookups[i] = [lookup]
def build(self, builder):
"""Calls the builder's ``add_chain_context_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_chain_context_subst(
self.location, prefix, glyphs, suffix, self.lookups
)
def asFea(self, indent=""):
res = "sub "
if (
len(self.prefix)
or len(self.suffix)
or any([x is not None for x in self.lookups])
):
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
for i, g in enumerate(self.glyphs):
res += g.asFea() + "'"
if self.lookups[i]:
for lu in self.lookups[i]:
res += " lookup " + lu.name
if i < len(self.glyphs) - 1:
res += " "
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(map(asFea, self.glyphs))
res += ";"
return res
class CursivePosStatement(Statement):
"""A cursive positioning statement. Entry and exit anchors can either
be :class:`Anchor` objects or ``None``."""
def __init__(self, glyphclass, entryAnchor, exitAnchor, location=None):
Statement.__init__(self, location)
self.glyphclass = glyphclass
self.entryAnchor, self.exitAnchor = entryAnchor, exitAnchor
def build(self, builder):
"""Calls the builder object's ``add_cursive_pos`` callback."""
builder.add_cursive_pos(
self.location, self.glyphclass.glyphSet(), self.entryAnchor, self.exitAnchor
)
def asFea(self, indent=""):
entry = self.entryAnchor.asFea() if self.entryAnchor else ""
exit = self.exitAnchor.asFea() if self.exitAnchor else ""
return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit)
class FeatureReferenceStatement(Statement):
"""Example: ``feature salt;``"""
def __init__(self, featureName, location=None):
Statement.__init__(self, location)
self.location, self.featureName = (location, featureName)
def build(self, builder):
"""Calls the builder object's ``add_feature_reference`` callback."""
builder.add_feature_reference(self.location, self.featureName)
def asFea(self, indent=""):
return "feature {};".format(self.featureName)
class IgnorePosStatement(Statement):
"""An ``ignore pos`` statement, containing `one or more` contexts to ignore.
``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples,
with each of ``prefix``, ``glyphs`` and ``suffix`` being
`glyph-containing objects`_ ."""
def __init__(self, chainContexts, location=None):
Statement.__init__(self, location)
self.chainContexts = chainContexts
def build(self, builder):
"""Calls the builder object's ``add_chain_context_pos`` callback on each
rule context."""
for prefix, glyphs, suffix in self.chainContexts:
prefix = [p.glyphSet() for p in prefix]
glyphs = [g.glyphSet() for g in glyphs]
suffix = [s.glyphSet() for s in suffix]
builder.add_chain_context_pos(self.location, prefix, glyphs, suffix, [])
def asFea(self, indent=""):
contexts = []
for prefix, glyphs, suffix in self.chainContexts:
res = ""
if len(prefix) or len(suffix):
if len(prefix):
res += " ".join(map(asFea, prefix)) + " "
res += " ".join(g.asFea() + "'" for g in glyphs)
if len(suffix):
res += " " + " ".join(map(asFea, suffix))
else:
res += " ".join(map(asFea, glyphs))
contexts.append(res)
return "ignore pos " + ", ".join(contexts) + ";"
class IgnoreSubstStatement(Statement):
"""An ``ignore sub`` statement, containing `one or more` contexts to ignore.
``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples,
with each of ``prefix``, ``glyphs`` and ``suffix`` being
`glyph-containing objects`_ ."""
def __init__(self, chainContexts, location=None):
Statement.__init__(self, location)
self.chainContexts = chainContexts
def build(self, builder):
"""Calls the builder object's ``add_chain_context_subst`` callback on
each rule context."""
for prefix, glyphs, suffix in self.chainContexts:
prefix = [p.glyphSet() for p in prefix]
glyphs = [g.glyphSet() for g in glyphs]
suffix = [s.glyphSet() for s in suffix]
builder.add_chain_context_subst(self.location, prefix, glyphs, suffix, [])
def asFea(self, indent=""):
contexts = []
for prefix, glyphs, suffix in self.chainContexts:
res = ""
if len(prefix):
res += " ".join(map(asFea, prefix)) + " "
res += " ".join(g.asFea() + "'" for g in glyphs)
if len(suffix):
res += " " + " ".join(map(asFea, suffix))
contexts.append(res)
return "ignore sub " + ", ".join(contexts) + ";"
class IncludeStatement(Statement):
"""An ``include()`` statement."""
def __init__(self, filename, location=None):
super(IncludeStatement, self).__init__(location)
self.filename = filename #: String containing name of file to include
def build(self):
# TODO: consider lazy-loading the including parser/lexer?
raise FeatureLibError(
"Building an include statement is not implemented yet. "
"Instead, use Parser(..., followIncludes=True) for building.",
self.location,
)
def asFea(self, indent=""):
return indent + "include(%s);" % self.filename
class LanguageStatement(Statement):
"""A ``language`` statement within a feature."""
def __init__(self, language, include_default=True, required=False, location=None):
Statement.__init__(self, location)
assert len(language) == 4
self.language = language #: A four-character language tag
self.include_default = include_default #: If false, "exclude_dflt"
self.required = required
def build(self, builder):
"""Call the builder object's ``set_language`` callback."""
builder.set_language(
location=self.location,
language=self.language,
include_default=self.include_default,
required=self.required,
)
def asFea(self, indent=""):
res = "language {}".format(self.language.strip())
if not self.include_default:
res += " exclude_dflt"
if self.required:
res += " required"
res += ";"
return res
class LanguageSystemStatement(Statement):
"""A top-level ``languagesystem`` statement."""
def __init__(self, script, language, location=None):
Statement.__init__(self, location)
self.script, self.language = (script, language)
def build(self, builder):
"""Calls the builder object's ``add_language_system`` callback."""
builder.add_language_system(self.location, self.script, self.language)
def asFea(self, indent=""):
return "languagesystem {} {};".format(self.script, self.language.strip())
class FontRevisionStatement(Statement):
"""A ``head`` table ``FontRevision`` statement. ``revision`` should be a
number, and will be formatted to three significant decimal places."""
def __init__(self, revision, location=None):
Statement.__init__(self, location)
self.revision = revision
def build(self, builder):
builder.set_font_revision(self.location, self.revision)
def asFea(self, indent=""):
return "FontRevision {:.3f};".format(self.revision)
class LigatureCaretByIndexStatement(Statement):
"""A ``GDEF`` table ``LigatureCaretByIndex`` statement. ``glyphs`` should be
a `glyph-containing object`_, and ``carets`` should be a list of integers."""
def __init__(self, glyphs, carets, location=None):
Statement.__init__(self, location)
self.glyphs, self.carets = (glyphs, carets)
def build(self, builder):
"""Calls the builder object's ``add_ligatureCaretByIndex_`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets))
def asFea(self, indent=""):
return "LigatureCaretByIndex {} {};".format(
self.glyphs.asFea(), " ".join(str(x) for x in self.carets)
)
class LigatureCaretByPosStatement(Statement):
"""A ``GDEF`` table ``LigatureCaretByPos`` statement. ``glyphs`` should be
a `glyph-containing object`_, and ``carets`` should be a list of integers."""
def __init__(self, glyphs, carets, location=None):
Statement.__init__(self, location)
self.glyphs, self.carets = (glyphs, carets)
def build(self, builder):
"""Calls the builder object's ``add_ligatureCaretByPos_`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets))
def asFea(self, indent=""):
return "LigatureCaretByPos {} {};".format(
self.glyphs.asFea(), " ".join(str(x) for x in self.carets)
)
class LigatureSubstStatement(Statement):
"""A chained contextual substitution statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_; ``replacement`` should be a single
`glyph-containing object`_.
If ``forceChain`` is True, this is expressed as a chaining rule
(e.g. ``sub f' i' by f_i``) even when no context is given."""
def __init__(self, prefix, glyphs, suffix, replacement, forceChain, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix)
self.replacement, self.forceChain = replacement, forceChain
def build(self, builder):
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_ligature_subst(
self.location, prefix, glyphs, suffix, self.replacement, self.forceChain
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
res += " ".join(g.asFea() + "'" for g in self.glyphs)
if len(self.suffix):
res += " " + " ".join(g.asFea() for g in self.suffix)
else:
res += " ".join(g.asFea() for g in self.glyphs)
res += " by "
res += asFea(self.replacement)
res += ";"
return res
class LookupFlagStatement(Statement):
"""A ``lookupflag`` statement. The ``value`` should be an integer value
representing the flags in use, but not including the ``markAttachment``
class and ``markFilteringSet`` values, which must be specified as
glyph-containing objects."""
def __init__(
self, value=0, markAttachment=None, markFilteringSet=None, location=None
):
Statement.__init__(self, location)
self.value = value
self.markAttachment = markAttachment
self.markFilteringSet = markFilteringSet
def build(self, builder):
"""Calls the builder object's ``set_lookup_flag`` callback."""
markAttach = None
if self.markAttachment is not None:
markAttach = self.markAttachment.glyphSet()
markFilter = None
if self.markFilteringSet is not None:
markFilter = self.markFilteringSet.glyphSet()
builder.set_lookup_flag(self.location, self.value, markAttach, markFilter)
def asFea(self, indent=""):
res = []
flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"]
curr = 1
for i in range(len(flags)):
if self.value & curr != 0:
res.append(flags[i])
curr = curr << 1
if self.markAttachment is not None:
res.append("MarkAttachmentType {}".format(self.markAttachment.asFea()))
if self.markFilteringSet is not None:
res.append("UseMarkFilteringSet {}".format(self.markFilteringSet.asFea()))
if not res:
res = ["0"]
return "lookupflag {};".format(" ".join(res))
class LookupReferenceStatement(Statement):
"""Represents a ``lookup ...;`` statement to include a lookup in a feature.
The ``lookup`` should be a :class:`LookupBlock` object."""
def __init__(self, lookup, location=None):
Statement.__init__(self, location)
self.location, self.lookup = (location, lookup)
def build(self, builder):
"""Calls the builder object's ``add_lookup_call`` callback."""
builder.add_lookup_call(self.lookup.name)
def asFea(self, indent=""):
return "lookup {};".format(self.lookup.name)
class MarkBasePosStatement(Statement):
"""A mark-to-base positioning rule. The ``base`` should be a
`glyph-containing object`_. The ``marks`` should be a list of
(:class:`Anchor`, :class:`MarkClass`) tuples."""
def __init__(self, base, marks, location=None):
Statement.__init__(self, location)
self.base, self.marks = base, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_base_pos`` callback."""
builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos base {}".format(self.base.asFea())
for a, m in self.marks:
res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name)
res += ";"
return res
class MarkLigPosStatement(Statement):
"""A mark-to-ligature positioning rule. The ``ligatures`` must be a
`glyph-containing object`_. The ``marks`` should be a list of lists: each
element in the top-level list represents a component glyph, and is made
up of a list of (:class:`Anchor`, :class:`MarkClass`) tuples representing
mark attachment points for that position.
Example::
m1 = MarkClass("TOP_MARKS")
m2 = MarkClass("BOTTOM_MARKS")
# ... add definitions to mark classes...
glyph = GlyphName("lam_meem_jeem")
marks = [
[ (Anchor(625,1800), m1) ], # Attachments on 1st component (lam)
[ (Anchor(376,-378), m2) ], # Attachments on 2nd component (meem)
[ ] # No attachments on the jeem
]
mlp = MarkLigPosStatement(glyph, marks)
mlp.asFea()
# pos ligature lam_meem_jeem mark @TOP_MARKS
# ligComponent mark @BOTTOM_MARKS;
"""
def __init__(self, ligatures, marks, location=None):
Statement.__init__(self, location)
self.ligatures, self.marks = ligatures, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_lig_pos`` callback."""
builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos ligature {}".format(self.ligatures.asFea())
ligs = []
for l in self.marks:
temp = ""
if l is None or not len(l):
temp = "\n" + indent + SHIFT * 2 + ""
else:
for a, m in l:
temp += (
"\n"
+ indent
+ SHIFT * 2
+ "{} mark @{}".format(a.asFea(), m.name)
)
ligs.append(temp)
res += ("\n" + indent + SHIFT + "ligComponent").join(ligs)
res += ";"
return res
class MarkMarkPosStatement(Statement):
"""A mark-to-mark positioning rule. The ``baseMarks`` must be a
`glyph-containing object`_. The ``marks`` should be a list of
(:class:`Anchor`, :class:`MarkClass`) tuples."""
def __init__(self, baseMarks, marks, location=None):
Statement.__init__(self, location)
self.baseMarks, self.marks = baseMarks, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_mark_pos`` callback."""
builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos mark {}".format(self.baseMarks.asFea())
for a, m in self.marks:
res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name)
res += ";"
return res
class MultipleSubstStatement(Statement):
"""A multiple substitution statement.
Args:
prefix: a list of `glyph-containing objects`_.
glyph: a single glyph-containing object.
suffix: a list of glyph-containing objects.
replacement: a list of glyph-containing objects.
forceChain: If true, the statement is expressed as a chaining rule
(e.g. ``sub f' i' by f_i``) even when no context is given.
"""
def __init__(
self, prefix, glyph, suffix, replacement, forceChain=False, location=None
):
Statement.__init__(self, location)
self.prefix, self.glyph, self.suffix = prefix, glyph, suffix
self.replacement = replacement
self.forceChain = forceChain
def build(self, builder):
"""Calls the builder object's ``add_multiple_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
if hasattr(self.glyph, "glyphSet"):
originals = self.glyph.glyphSet()
else:
originals = [self.glyph]
count = len(originals)
replaces = []
for r in self.replacement:
if hasattr(r, "glyphSet"):
replace = r.glyphSet()
else:
replace = [r]
if len(replace) == 1 and len(replace) != count:
replace = replace * count
replaces.append(replace)
replaces = list(zip(*replaces))
seen_originals = set()
for i, original in enumerate(originals):
if original not in seen_originals:
seen_originals.add(original)
builder.add_multiple_subst(
self.location,
prefix,
original,
suffix,
replaces and replaces[i] or (),
self.forceChain,
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += asFea(self.glyph) + "'"
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += asFea(self.glyph)
replacement = self.replacement or [NullGlyph()]
res += " by "
res += " ".join(map(asFea, replacement))
res += ";"
return res
class PairPosStatement(Statement):
"""A pair positioning statement.
``glyphs1`` and ``glyphs2`` should be `glyph-containing objects`_.
``valuerecord1`` should be a :class:`ValueRecord` object;
``valuerecord2`` should be either a :class:`ValueRecord` object or ``None``.
If ``enumerated`` is true, then this is expressed as an
`enumerated pair `_.
"""
def __init__(
self,
glyphs1,
valuerecord1,
glyphs2,
valuerecord2,
enumerated=False,
location=None,
):
Statement.__init__(self, location)
self.enumerated = enumerated
self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1
self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2
def build(self, builder):
"""Calls a callback on the builder object:
* If the rule is enumerated, calls ``add_specific_pair_pos`` on each
combination of first and second glyphs.
* If the glyphs are both single :class:`GlyphName` objects, calls
``add_specific_pair_pos``.
* Else, calls ``add_class_pair_pos``.
"""
if self.enumerated:
g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()]
seen_pair = False
for glyph1, glyph2 in itertools.product(*g):
seen_pair = True
builder.add_specific_pair_pos(
self.location, glyph1, self.valuerecord1, glyph2, self.valuerecord2
)
if not seen_pair:
raise FeatureLibError(
"Empty glyph class in positioning rule", self.location
)
return
is_specific = isinstance(self.glyphs1, GlyphName) and isinstance(
self.glyphs2, GlyphName
)
if is_specific:
builder.add_specific_pair_pos(
self.location,
self.glyphs1.glyph,
self.valuerecord1,
self.glyphs2.glyph,
self.valuerecord2,
)
else:
builder.add_class_pair_pos(
self.location,
self.glyphs1.glyphSet(),
self.valuerecord1,
self.glyphs2.glyphSet(),
self.valuerecord2,
)
def asFea(self, indent=""):
res = "enum " if self.enumerated else ""
if self.valuerecord2:
res += "pos {} {} {} {};".format(
self.glyphs1.asFea(),
self.valuerecord1.asFea(),
self.glyphs2.asFea(),
self.valuerecord2.asFea(),
)
else:
res += "pos {} {} {};".format(
self.glyphs1.asFea(), self.glyphs2.asFea(), self.valuerecord1.asFea()
)
return res
class ReverseChainSingleSubstStatement(Statement):
"""A reverse chaining substitution statement. You don't see those every day.
Note the unusual argument order: ``suffix`` comes `before` ``glyphs``.
``old_prefix``, ``old_suffix``, ``glyphs`` and ``replacements`` should be
lists of `glyph-containing objects`_. ``glyphs`` and ``replacements`` should
be one-item lists.
"""
def __init__(self, old_prefix, old_suffix, glyphs, replacements, location=None):
Statement.__init__(self, location)
self.old_prefix, self.old_suffix = old_prefix, old_suffix
self.glyphs = glyphs
self.replacements = replacements
def build(self, builder):
prefix = [p.glyphSet() for p in self.old_prefix]
suffix = [s.glyphSet() for s in self.old_suffix]
originals = self.glyphs[0].glyphSet()
replaces = self.replacements[0].glyphSet()
if len(replaces) == 1:
replaces = replaces * len(originals)
builder.add_reverse_chain_single_subst(
self.location, prefix, suffix, dict(zip(originals, replaces))
)
def asFea(self, indent=""):
res = "rsub "
if len(self.old_prefix) or len(self.old_suffix):
if len(self.old_prefix):
res += " ".join(asFea(g) for g in self.old_prefix) + " "
res += " ".join(asFea(g) + "'" for g in self.glyphs)
if len(self.old_suffix):
res += " " + " ".join(asFea(g) for g in self.old_suffix)
else:
res += " ".join(map(asFea, self.glyphs))
res += " by {};".format(" ".join(asFea(g) for g in self.replacements))
return res
class SingleSubstStatement(Statement):
"""A single substitution statement.
Note the unusual argument order: ``prefix`` and suffix come `after`
the replacement ``glyphs``. ``prefix``, ``suffix``, ``glyphs`` and
``replace`` should be lists of `glyph-containing objects`_. ``glyphs`` and
``replace`` should be one-item lists.
"""
def __init__(self, glyphs, replace, prefix, suffix, forceChain, location=None):
Statement.__init__(self, location)
self.prefix, self.suffix = prefix, suffix
self.forceChain = forceChain
self.glyphs = glyphs
self.replacements = replace
def build(self, builder):
"""Calls the builder object's ``add_single_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
originals = self.glyphs[0].glyphSet()
replaces = self.replacements[0].glyphSet()
if len(replaces) == 1:
replaces = replaces * len(originals)
builder.add_single_subst(
self.location,
prefix,
suffix,
OrderedDict(zip(originals, replaces)),
self.forceChain,
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(asFea(g) for g in self.prefix) + " "
res += " ".join(asFea(g) + "'" for g in self.glyphs)
if len(self.suffix):
res += " " + " ".join(asFea(g) for g in self.suffix)
else:
res += " ".join(asFea(g) for g in self.glyphs)
res += " by {};".format(" ".join(asFea(g) for g in self.replacements))
return res
class ScriptStatement(Statement):
"""A ``script`` statement."""
def __init__(self, script, location=None):
Statement.__init__(self, location)
self.script = script #: the script code
def build(self, builder):
"""Calls the builder's ``set_script`` callback."""
builder.set_script(self.location, self.script)
def asFea(self, indent=""):
return "script {};".format(self.script.strip())
class SinglePosStatement(Statement):
"""A single position statement. ``prefix`` and ``suffix`` should be
lists of `glyph-containing objects`_.
``pos`` should be a one-element list containing a (`glyph-containing object`_,
:class:`ValueRecord`) tuple."""
def __init__(self, pos, prefix, suffix, forceChain, location=None):
Statement.__init__(self, location)
self.pos, self.prefix, self.suffix = pos, prefix, suffix
self.forceChain = forceChain
def build(self, builder):
"""Calls the builder object's ``add_single_pos`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
pos = [(g.glyphSet(), value) for g, value in self.pos]
builder.add_single_pos(self.location, prefix, suffix, pos, self.forceChain)
def asFea(self, indent=""):
res = "pos "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += " ".join(
[
asFea(x[0])
+ "'"
+ ((" " + x[1].asFea()) if x[1] is not None else "")
for x in self.pos
]
)
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(
[
asFea(x[0]) + " " + (x[1].asFea() if x[1] is not None else "")
for x in self.pos
]
)
res += ";"
return res
class SubtableStatement(Statement):
"""Represents a subtable break."""
def __init__(self, location=None):
Statement.__init__(self, location)
def build(self, builder):
"""Calls the builder objects's ``add_subtable_break`` callback."""
builder.add_subtable_break(self.location)
def asFea(self, indent=""):
return "subtable;"
class ValueRecord(Expression):
"""Represents a value record."""
def __init__(
self,
xPlacement=None,
yPlacement=None,
xAdvance=None,
yAdvance=None,
xPlaDevice=None,
yPlaDevice=None,
xAdvDevice=None,
yAdvDevice=None,
vertical=False,
location=None,
):
Expression.__init__(self, location)
self.xPlacement, self.yPlacement = (xPlacement, yPlacement)
self.xAdvance, self.yAdvance = (xAdvance, yAdvance)
self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice)
self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice)
self.vertical = vertical
def __eq__(self, other):
return (
self.xPlacement == other.xPlacement
and self.yPlacement == other.yPlacement
and self.xAdvance == other.xAdvance
and self.yAdvance == other.yAdvance
and self.xPlaDevice == other.xPlaDevice
and self.xAdvDevice == other.xAdvDevice
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return (
hash(self.xPlacement)
^ hash(self.yPlacement)
^ hash(self.xAdvance)
^ hash(self.yAdvance)
^ hash(self.xPlaDevice)
^ hash(self.yPlaDevice)
^ hash(self.xAdvDevice)
^ hash(self.yAdvDevice)
)
def asFea(self, indent=""):
if not self:
return ""
x, y = self.xPlacement, self.yPlacement
xAdvance, yAdvance = self.xAdvance, self.yAdvance
xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice
xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice
vertical = self.vertical
# Try format A, if possible.
if x is None and y is None:
if xAdvance is None and vertical:
return str(yAdvance)
elif yAdvance is None and not vertical:
return str(xAdvance)
# Make any remaining None value 0 to avoid generating invalid records.
x = x or 0
y = y or 0
xAdvance = xAdvance or 0
yAdvance = yAdvance or 0
# Try format B, if possible.
if (
xPlaDevice is None
and yPlaDevice is None
and xAdvDevice is None
and yAdvDevice is None
):
return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance)
# Last resort is format C.
return "<%s %s %s %s %s %s %s %s>" % (
x,
y,
xAdvance,
yAdvance,
deviceToString(xPlaDevice),
deviceToString(yPlaDevice),
deviceToString(xAdvDevice),
deviceToString(yAdvDevice),
)
def __bool__(self):
return any(
getattr(self, v) is not None
for v in [
"xPlacement",
"yPlacement",
"xAdvance",
"yAdvance",
"xPlaDevice",
"yPlaDevice",
"xAdvDevice",
"yAdvDevice",
]
)
__nonzero__ = __bool__
class ValueRecordDefinition(Statement):
"""Represents a named value record definition."""
def __init__(self, name, value, location=None):
Statement.__init__(self, location)
self.name = name #: Value record name as string
self.value = value #: :class:`ValueRecord` object
def asFea(self, indent=""):
return "valueRecordDef {} {};".format(self.value.asFea(), self.name)
def simplify_name_attributes(pid, eid, lid):
if pid == 3 and eid == 1 and lid == 1033:
return ""
elif pid == 1 and eid == 0 and lid == 0:
return "1"
else:
return "{} {} {}".format(pid, eid, lid)
class NameRecord(Statement):
"""Represents a name record. (`Section 9.e. `_)"""
def __init__(self, nameID, platformID, platEncID, langID, string, location=None):
Statement.__init__(self, location)
self.nameID = nameID #: Name ID as integer (e.g. 9 for designer's name)
self.platformID = platformID #: Platform ID as integer
self.platEncID = platEncID #: Platform encoding ID as integer
self.langID = langID #: Language ID as integer
self.string = string #: Name record value
def build(self, builder):
"""Calls the builder object's ``add_name_record`` callback."""
builder.add_name_record(
self.location,
self.nameID,
self.platformID,
self.platEncID,
self.langID,
self.string,
)
def asFea(self, indent=""):
def escape(c, escape_pattern):
# Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS
if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C):
return chr(c)
else:
return escape_pattern % c
encoding = getEncoding(self.platformID, self.platEncID, self.langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", self.location)
s = tobytes(self.string, encoding=encoding)
if encoding == "utf_16_be":
escaped_string = "".join(
[
escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x")
for i in range(0, len(s), 2)
]
)
else:
escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s])
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'nameid {} {}"{}";'.format(self.nameID, plat, escaped_string)
class FeatureNameStatement(NameRecord):
"""Represents a ``sizemenuname`` or ``name`` statement."""
def build(self, builder):
"""Calls the builder object's ``add_featureName`` callback."""
NameRecord.build(self, builder)
builder.add_featureName(self.nameID)
def asFea(self, indent=""):
if self.nameID == "size":
tag = "sizemenuname"
else:
tag = "name"
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return '{} {}"{}";'.format(tag, plat, self.string)
class STATNameStatement(NameRecord):
"""Represents a STAT table ``name`` statement."""
def asFea(self, indent=""):
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'name {}"{}";'.format(plat, self.string)
class SizeParameters(Statement):
"""A ``parameters`` statement."""
def __init__(self, DesignSize, SubfamilyID, RangeStart, RangeEnd, location=None):
Statement.__init__(self, location)
self.DesignSize = DesignSize
self.SubfamilyID = SubfamilyID
self.RangeStart = RangeStart
self.RangeEnd = RangeEnd
def build(self, builder):
"""Calls the builder object's ``set_size_parameters`` callback."""
builder.set_size_parameters(
self.location,
self.DesignSize,
self.SubfamilyID,
self.RangeStart,
self.RangeEnd,
)
def asFea(self, indent=""):
res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID)
if self.RangeStart != 0 or self.RangeEnd != 0:
res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10))
return res + ";"
class CVParametersNameStatement(NameRecord):
"""Represent a name statement inside a ``cvParameters`` block."""
def __init__(
self, nameID, platformID, platEncID, langID, string, block_name, location=None
):
NameRecord.__init__(
self, nameID, platformID, platEncID, langID, string, location=location
)
self.block_name = block_name
def build(self, builder):
"""Calls the builder object's ``add_cv_parameter`` callback."""
item = ""
if self.block_name == "ParamUILabelNameID":
item = "_{}".format(builder.cv_num_named_params_.get(self.nameID, 0))
builder.add_cv_parameter(self.nameID)
self.nameID = (self.nameID, self.block_name + item)
NameRecord.build(self, builder)
def asFea(self, indent=""):
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'name {}"{}";'.format(plat, self.string)
class CharacterStatement(Statement):
"""
Statement used in cvParameters blocks of Character Variant features (cvXX).
The Unicode value may be written with either decimal or hexadecimal
notation. The value must be preceded by '0x' if it is a hexadecimal value.
The largest Unicode value allowed is 0xFFFFFF.
"""
def __init__(self, character, tag, location=None):
Statement.__init__(self, location)
self.character = character
self.tag = tag
def build(self, builder):
"""Calls the builder object's ``add_cv_character`` callback."""
builder.add_cv_character(self.character, self.tag)
def asFea(self, indent=""):
return "Character {:#x};".format(self.character)
class BaseAxis(Statement):
"""An axis definition, being either a ``VertAxis.BaseTagList/BaseScriptList``
pair or a ``HorizAxis.BaseTagList/BaseScriptList`` pair."""
def __init__(self, bases, scripts, vertical, minmax=None, location=None):
Statement.__init__(self, location)
self.bases = bases #: A list of baseline tag names as strings
self.scripts = scripts #: A list of script record tuplets (script tag, default baseline tag, base coordinate)
self.vertical = vertical #: Boolean; VertAxis if True, HorizAxis if False
self.minmax = [] #: A set of minmax record
def build(self, builder):
"""Calls the builder object's ``set_base_axis`` callback."""
builder.set_base_axis(self.bases, self.scripts, self.vertical, self.minmax)
def asFea(self, indent=""):
direction = "Vert" if self.vertical else "Horiz"
scripts = [
"{} {} {}".format(a[0], a[1], " ".join(map(str, a[2])))
for a in self.scripts
]
minmaxes = [
"\n{}Axis.MinMax {} {} {}, {};".format(direction, a[0], a[1], a[2], a[3])
for a in self.minmax
]
return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format(
direction, " ".join(self.bases), indent, direction, ", ".join(scripts)
) + "\n".join(minmaxes)
class OS2Field(Statement):
"""An entry in the ``OS/2`` table. Most ``values`` should be numbers or
strings, apart from when the key is ``UnicodeRange``, ``CodePageRange``
or ``Panose``, in which case it should be an array of integers."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_os2_field`` callback."""
builder.add_os2_field(self.key, self.value)
def asFea(self, indent=""):
def intarr2str(x):
return " ".join(map(str, x))
numbers = (
"FSType",
"TypoAscender",
"TypoDescender",
"TypoLineGap",
"winAscent",
"winDescent",
"XHeight",
"CapHeight",
"WeightClass",
"WidthClass",
"LowerOpSize",
"UpperOpSize",
)
ranges = ("UnicodeRange", "CodePageRange")
keywords = dict([(x.lower(), [x, str]) for x in numbers])
keywords.update([(x.lower(), [x, intarr2str]) for x in ranges])
keywords["panose"] = ["Panose", intarr2str]
keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)]
if self.key in keywords:
return "{} {};".format(
keywords[self.key][0], keywords[self.key][1](self.value)
)
return "" # should raise exception
class HheaField(Statement):
"""An entry in the ``hhea`` table."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_hhea_field`` callback."""
builder.add_hhea_field(self.key, self.value)
def asFea(self, indent=""):
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
keywords = dict([(x.lower(), x) for x in fields])
return "{} {};".format(keywords[self.key], self.value)
class VheaField(Statement):
"""An entry in the ``vhea`` table."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_vhea_field`` callback."""
builder.add_vhea_field(self.key, self.value)
def asFea(self, indent=""):
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
keywords = dict([(x.lower(), x) for x in fields])
return "{} {};".format(keywords[self.key], self.value)
class STATDesignAxisStatement(Statement):
"""A STAT table Design Axis
Args:
tag (str): a 4 letter axis tag
axisOrder (int): an int
names (list): a list of :class:`STATNameStatement` objects
"""
def __init__(self, tag, axisOrder, names, location=None):
Statement.__init__(self, location)
self.tag = tag
self.axisOrder = axisOrder
self.names = names
self.location = location
def build(self, builder):
builder.addDesignAxis(self, self.location)
def asFea(self, indent=""):
indent += SHIFT
res = f"DesignAxis {self.tag} {self.axisOrder} {{ \n"
res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n"
res += "};"
return res
class ElidedFallbackName(Statement):
"""STAT table ElidedFallbackName
Args:
names: a list of :class:`STATNameStatement` objects
"""
def __init__(self, names, location=None):
Statement.__init__(self, location)
self.names = names
self.location = location
def build(self, builder):
builder.setElidedFallbackName(self.names, self.location)
def asFea(self, indent=""):
indent += SHIFT
res = "ElidedFallbackName { \n"
res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n"
res += "};"
return res
class ElidedFallbackNameID(Statement):
"""STAT table ElidedFallbackNameID
Args:
value: an int pointing to an existing name table name ID
"""
def __init__(self, value, location=None):
Statement.__init__(self, location)
self.value = value
self.location = location
def build(self, builder):
builder.setElidedFallbackName(self.value, self.location)
def asFea(self, indent=""):
return f"ElidedFallbackNameID {self.value};"
class STATAxisValueStatement(Statement):
"""A STAT table Axis Value Record
Args:
names (list): a list of :class:`STATNameStatement` objects
locations (list): a list of :class:`AxisValueLocationStatement` objects
flags (int): an int
"""
def __init__(self, names, locations, flags, location=None):
Statement.__init__(self, location)
self.names = names
self.locations = locations
self.flags = flags
def build(self, builder):
builder.addAxisValueRecord(self, self.location)
def asFea(self, indent=""):
res = "AxisValue {\n"
for location in self.locations:
res += location.asFea()
for nameRecord in self.names:
res += nameRecord.asFea()
res += "\n"
if self.flags:
flags = ["OlderSiblingFontAttribute", "ElidableAxisValueName"]
flagStrings = []
curr = 1
for i in range(len(flags)):
if self.flags & curr != 0:
flagStrings.append(flags[i])
curr = curr << 1
res += f"flag {' '.join(flagStrings)};\n"
res += "};"
return res
class AxisValueLocationStatement(Statement):
"""
A STAT table Axis Value Location
Args:
tag (str): a 4 letter axis tag
values (list): a list of ints and/or floats
"""
def __init__(self, tag, values, location=None):
Statement.__init__(self, location)
self.tag = tag
self.values = values
def asFea(self, res=""):
res += f"location {self.tag} "
res += f"{' '.join(str(i) for i in self.values)};\n"
return res
class ConditionsetStatement(Statement):
"""
A variable layout conditionset
Args:
name (str): the name of this conditionset
conditions (dict): a dictionary mapping axis tags to a
tuple of (min,max) userspace coordinates.
"""
def __init__(self, name, conditions, location=None):
Statement.__init__(self, location)
self.name = name
self.conditions = conditions
def build(self, builder):
builder.add_conditionset(self.location, self.name, self.conditions)
def asFea(self, res="", indent=""):
res += indent + f"conditionset {self.name} " + "{\n"
for tag, (minvalue, maxvalue) in self.conditions.items():
res += indent + SHIFT + f"{tag} {minvalue} {maxvalue};\n"
res += indent + "}" + f" {self.name};\n"
return res
class VariationBlock(Block):
"""A variation feature block, applicable in a given set of conditions."""
def __init__(self, name, conditionset, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.conditionset, self.use_extension = (
name,
conditionset,
use_extension,
)
def build(self, builder):
"""Call the ``start_feature`` callback on the builder object, visit
all the statements in this feature, and then call ``end_feature``."""
builder.start_feature(self.location, self.name, self.use_extension)
if (
self.conditionset != "NULL"
and self.conditionset not in builder.conditionsets_
):
raise FeatureLibError(
f"variation block used undefined conditionset {self.conditionset}",
self.location,
)
# language exclude_dflt statements modify builder.features_
# limit them to this block with temporary builder.features_
features = builder.features_
builder.features_ = {}
Block.build(self, builder)
for key, value in builder.features_.items():
items = builder.feature_variations_.setdefault(key, {}).setdefault(
self.conditionset, []
)
items.extend(value)
if key not in features:
features[key] = [] # Ensure we make a feature record
builder.features_ = features
builder.end_feature()
def asFea(self, indent=""):
res = indent + "variation %s " % self.name.strip()
res += self.conditionset + " "
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += indent + "} %s;\n" % self.name.strip()
return res
from fontTools.misc import sstruct
from fontTools.misc.textTools import Tag, tostr, binary2num, safeEval
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lookupDebugInfo import (
LookupDebugInfo,
LOOKUP_DEBUG_INFO_KEY,
LOOKUP_DEBUG_ENV_VAR,
)
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.ast import FeatureFile
from fontTools.feaLib.variableScalar import VariableScalar
from fontTools.otlLib import builder as otl
from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.ttLib import newTable, getTableModule
from fontTools.ttLib.tables import otBase, otTables
from fontTools.otlLib.builder import (
AlternateSubstBuilder,
ChainContextPosBuilder,
ChainContextSubstBuilder,
LigatureSubstBuilder,
MultipleSubstBuilder,
CursivePosBuilder,
MarkBasePosBuilder,
MarkLigPosBuilder,
MarkMarkPosBuilder,
ReverseChainSingleSubstBuilder,
SingleSubstBuilder,
ClassPairPosSubtableBuilder,
PairPosBuilder,
SinglePosBuilder,
ChainContextualRule,
AnySubstBuilder,
)
from fontTools.otlLib.error import OpenTypeLibError
from fontTools.varLib.varStore import OnlineVarStoreBuilder
from fontTools.varLib.builder import buildVarDevTable
from fontTools.varLib.featureVars import addFeatureVariationsRaw
from fontTools.varLib.models import normalizeValue, piecewiseLinearMap
from collections import defaultdict
import copy
import itertools
from io import StringIO
import logging
import warnings
import os
log = logging.getLogger(__name__)
def addOpenTypeFeatures(font, featurefile, tables=None, debug=False):
"""Add features from a file to a font. Note that this replaces any features
currently present.
Args:
font (feaLib.ttLib.TTFont): The font object.
featurefile: Either a path or file object (in which case we
parse it into an AST), or a pre-parsed AST instance.
tables: If passed, restrict the set of affected tables to those in the
list.
debug: Whether to add source debugging information to the font in the
``Debg`` table
"""
builder = Builder(font, featurefile)
builder.build(tables=tables, debug=debug)
def addOpenTypeFeaturesFromString(
font, features, filename=None, tables=None, debug=False
):
"""Add features from a string to a font. Note that this replaces any
features currently present.
Args:
font (feaLib.ttLib.TTFont): The font object.
features: A string containing feature code.
filename: The directory containing ``filename`` is used as the root of
relative ``include()`` paths; if ``None`` is provided, the current
directory is assumed.
tables: If passed, restrict the set of affected tables to those in the
list.
debug: Whether to add source debugging information to the font in the
``Debg`` table
"""
featurefile = StringIO(tostr(features))
if filename:
featurefile.name = filename
addOpenTypeFeatures(font, featurefile, tables=tables, debug=debug)
class Builder(object):
supportedTables = frozenset(
Tag(tag)
for tag in [
"BASE",
"GDEF",
"GPOS",
"GSUB",
"OS/2",
"head",
"hhea",
"name",
"vhea",
"STAT",
]
)
def __init__(self, font, featurefile):
self.font = font
# 'featurefile' can be either a path or file object (in which case we
# parse it into an AST), or a pre-parsed AST instance
if isinstance(featurefile, FeatureFile):
self.parseTree, self.file = featurefile, None
else:
self.parseTree, self.file = None, featurefile
self.glyphMap = font.getReverseGlyphMap()
self.varstorebuilder = None
if "fvar" in font:
self.axes = font["fvar"].axes
self.varstorebuilder = OnlineVarStoreBuilder(
[ax.axisTag for ax in self.axes]
)
self.default_language_systems_ = set()
self.script_ = None
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.use_extension_ = False
self.language_systems = set()
self.seen_non_DFLT_script_ = False
self.named_lookups_ = {}
self.cur_lookup_ = None
self.cur_lookup_name_ = None
self.cur_feature_name_ = None
self.lookups_ = []
self.lookup_locations = {"GSUB": {}, "GPOS": {}}
self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*]
self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp'
self.feature_variations_ = {}
# for feature 'aalt'
self.aalt_features_ = [] # [(location, featureName)*], for 'aalt'
self.aalt_location_ = None
self.aalt_alternates_ = {}
self.aalt_use_extension_ = False
# for 'featureNames'
self.featureNames_ = set()
self.featureNames_ids_ = {}
# for 'cvParameters'
self.cv_parameters_ = set()
self.cv_parameters_ids_ = {}
self.cv_num_named_params_ = {}
self.cv_characters_ = defaultdict(list)
# for feature 'size'
self.size_parameters_ = None
# for table 'head'
self.fontRevision_ = None # 2.71
# for table 'name'
self.names_ = []
# for table 'BASE'
self.base_horiz_axis_ = None
self.base_vert_axis_ = None
# for table 'GDEF'
self.attachPoints_ = {} # "a" --> {3, 7}
self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600}
self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7}
self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column))
self.markAttach_ = {} # "acute" --> (4, (file, line, column))
self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4
self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4
# for table 'OS/2'
self.os2_ = {}
# for table 'hhea'
self.hhea_ = {}
# for table 'vhea'
self.vhea_ = {}
# for table 'STAT'
self.stat_ = {}
# for conditionsets
self.conditionsets_ = {}
# We will often use exactly the same locations (i.e. the font's masters)
# for a large number of variable scalars. Instead of creating a model
# for each, let's share the models.
self.model_cache = {}
def build(self, tables=None, debug=False):
if self.parseTree is None:
self.parseTree = Parser(self.file, self.glyphMap).parse()
self.parseTree.build(self)
# by default, build all the supported tables
if tables is None:
tables = self.supportedTables
else:
tables = frozenset(tables)
unsupported = tables - self.supportedTables
if unsupported:
unsupported_string = ", ".join(sorted(unsupported))
raise NotImplementedError(
"The following tables were requested but are unsupported: "
f"{unsupported_string}."
)
if "GSUB" in tables:
self.build_feature_aalt_()
if "head" in tables:
self.build_head()
if "hhea" in tables:
self.build_hhea()
if "vhea" in tables:
self.build_vhea()
if "name" in tables:
self.build_name()
if "OS/2" in tables:
self.build_OS_2()
if "STAT" in tables:
self.build_STAT()
for tag in ("GPOS", "GSUB"):
if tag not in tables:
continue
table = self.makeTable(tag)
if self.feature_variations_:
self.makeFeatureVariations(table, tag)
if (
table.ScriptList.ScriptCount > 0
or table.FeatureList.FeatureCount > 0
or table.LookupList.LookupCount > 0
):
fontTable = self.font[tag] = newTable(tag)
fontTable.table = table
elif tag in self.font:
del self.font[tag]
if any(tag in self.font for tag in ("GPOS", "GSUB")) and "OS/2" in self.font:
self.font["OS/2"].usMaxContext = maxCtxFont(self.font)
if "GDEF" in tables:
gdef = self.buildGDEF()
if gdef:
self.font["GDEF"] = gdef
elif "GDEF" in self.font:
del self.font["GDEF"]
if "BASE" in tables:
base = self.buildBASE()
if base:
self.font["BASE"] = base
elif "BASE" in self.font:
del self.font["BASE"]
if debug or os.environ.get(LOOKUP_DEBUG_ENV_VAR):
self.buildDebg()
def get_chained_lookup_(self, location, builder_class):
result = builder_class(self.font, location)
result.lookupflag = self.lookupflag_
result.markFilterSet = self.lookupflag_markFilterSet_
result.extension = self.use_extension_
self.lookups_.append(result)
return result
def add_lookup_to_feature_(self, lookup, feature_name):
for script, lang in self.language_systems:
key = (script, lang, feature_name)
self.features_.setdefault(key, []).append(lookup)
def get_lookup_(self, location, builder_class, mapping=None):
if (
self.cur_lookup_
and type(self.cur_lookup_) == builder_class
and self.cur_lookup_.lookupflag == self.lookupflag_
and self.cur_lookup_.markFilterSet == self.lookupflag_markFilterSet_
and self.cur_lookup_.can_add_mapping(mapping)
):
return self.cur_lookup_
if self.cur_lookup_name_ and self.cur_lookup_:
raise FeatureLibError(
"Within a named lookup block, all rules must be of "
"the same lookup type and flag",
location,
)
self.cur_lookup_ = builder_class(self.font, location)
self.cur_lookup_.lookupflag = self.lookupflag_
self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_
self.cur_lookup_.extension = self.use_extension_
self.lookups_.append(self.cur_lookup_)
if self.cur_lookup_name_:
# We are starting a lookup rule inside a named lookup block.
self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_
if self.cur_feature_name_:
# We are starting a lookup rule inside a feature. This includes
# lookup rules inside named lookups inside features.
self.add_lookup_to_feature_(self.cur_lookup_, self.cur_feature_name_)
return self.cur_lookup_
def build_feature_aalt_(self):
if not self.aalt_features_ and not self.aalt_alternates_:
return
# > alternate glyphs will be sorted in the order that the source features
# > are named in the aalt definition, not the order of the feature definitions
# > in the file. Alternates defined explicitly ... will precede all others.
# https://github.com/fonttools/fonttools/issues/836
alternates = {g: list(a) for g, a in self.aalt_alternates_.items()}
for location, name in self.aalt_features_ + [(None, "aalt")]:
feature = [
(script, lang, feature, lookups)
for (script, lang, feature), lookups in self.features_.items()
if feature == name
]
# "aalt" does not have to specify its own lookups, but it might.
if not feature and name != "aalt":
warnings.warn("%s: Feature %s has not been defined" % (location, name))
continue
for script, lang, feature, lookups in feature:
for lookuplist in lookups:
if not isinstance(lookuplist, list):
lookuplist = [lookuplist]
for lookup in lookuplist:
for glyph, alts in lookup.getAlternateGlyphs().items():
alts_for_glyph = alternates.setdefault(glyph, [])
alts_for_glyph.extend(
g for g in alts if g not in alts_for_glyph
)
single = {
glyph: repl[0] for glyph, repl in alternates.items() if len(repl) == 1
}
multi = {glyph: repl for glyph, repl in alternates.items() if len(repl) > 1}
if not single and not multi:
return
self.features_ = {
(script, lang, feature): lookups
for (script, lang, feature), lookups in self.features_.items()
if feature != "aalt"
}
old_lookups = self.lookups_
self.lookups_ = []
self.start_feature(self.aalt_location_, "aalt", self.aalt_use_extension_)
if single:
single_lookup = self.get_lookup_(location, SingleSubstBuilder)
single_lookup.mapping = single
if multi:
multi_lookup = self.get_lookup_(location, AlternateSubstBuilder)
multi_lookup.alternates = multi
self.end_feature()
self.lookups_.extend(old_lookups)
def build_head(self):
if not self.fontRevision_:
return
table = self.font.get("head")
if not table: # this only happens for unit tests
table = self.font["head"] = newTable("head")
table.decompile(b"\0" * 54, self.font)
table.tableVersion = 1.0
table.magicNumber = 0x5F0F3CF5
table.created = table.modified = 3406620153 # 2011-12-13 11:22:33
table.fontRevision = self.fontRevision_
def build_hhea(self):
if not self.hhea_:
return
table = self.font.get("hhea")
if not table: # this only happens for unit tests
table = self.font["hhea"] = newTable("hhea")
table.decompile(b"\0" * 36, self.font)
table.tableVersion = 0x00010000
if "caretoffset" in self.hhea_:
table.caretOffset = self.hhea_["caretoffset"]
if "ascender" in self.hhea_:
table.ascent = self.hhea_["ascender"]
if "descender" in self.hhea_:
table.descent = self.hhea_["descender"]
if "linegap" in self.hhea_:
table.lineGap = self.hhea_["linegap"]
def build_vhea(self):
if not self.vhea_:
return
table = self.font.get("vhea")
if not table: # this only happens for unit tests
table = self.font["vhea"] = newTable("vhea")
table.decompile(b"\0" * 36, self.font)
table.tableVersion = 0x00011000
if "verttypoascender" in self.vhea_:
table.ascent = self.vhea_["verttypoascender"]
if "verttypodescender" in self.vhea_:
table.descent = self.vhea_["verttypodescender"]
if "verttypolinegap" in self.vhea_:
table.lineGap = self.vhea_["verttypolinegap"]
def get_user_name_id(self, table):
# Try to find first unused font-specific name id
nameIDs = [name.nameID for name in table.names]
for user_name_id in range(256, 32767):
if user_name_id not in nameIDs:
return user_name_id
def buildFeatureParams(self, tag):
params = None
if tag == "size":
params = otTables.FeatureParamsSize()
(
params.DesignSize,
params.SubfamilyID,
params.RangeStart,
params.RangeEnd,
) = self.size_parameters_
if tag in self.featureNames_ids_:
params.SubfamilyNameID = self.featureNames_ids_[tag]
else:
params.SubfamilyNameID = 0
elif tag in self.featureNames_:
if not self.featureNames_ids_:
# name table wasn't selected among the tables to build; skip
pass
else:
assert tag in self.featureNames_ids_
params = otTables.FeatureParamsStylisticSet()
params.Version = 0
params.UINameID = self.featureNames_ids_[tag]
elif tag in self.cv_parameters_:
params = otTables.FeatureParamsCharacterVariants()
params.Format = 0
params.FeatUILabelNameID = self.cv_parameters_ids_.get(
(tag, "FeatUILabelNameID"), 0
)
params.FeatUITooltipTextNameID = self.cv_parameters_ids_.get(
(tag, "FeatUITooltipTextNameID"), 0
)
params.SampleTextNameID = self.cv_parameters_ids_.get(
(tag, "SampleTextNameID"), 0
)
params.NumNamedParameters = self.cv_num_named_params_.get(tag, 0)
params.FirstParamUILabelNameID = self.cv_parameters_ids_.get(
(tag, "ParamUILabelNameID_0"), 0
)
params.CharCount = len(self.cv_characters_[tag])
params.Character = self.cv_characters_[tag]
return params
def build_name(self):
if not self.names_:
return
table = self.font.get("name")
if not table: # this only happens for unit tests
table = self.font["name"] = newTable("name")
table.names = []
for name in self.names_:
nameID, platformID, platEncID, langID, string = name
# For featureNames block, nameID is 'feature tag'
# For cvParameters blocks, nameID is ('feature tag', 'block name')
if not isinstance(nameID, int):
tag = nameID
if tag in self.featureNames_:
if tag not in self.featureNames_ids_:
self.featureNames_ids_[tag] = self.get_user_name_id(table)
assert self.featureNames_ids_[tag] is not None
nameID = self.featureNames_ids_[tag]
elif tag[0] in self.cv_parameters_:
if tag not in self.cv_parameters_ids_:
self.cv_parameters_ids_[tag] = self.get_user_name_id(table)
assert self.cv_parameters_ids_[tag] is not None
nameID = self.cv_parameters_ids_[tag]
table.setName(string, nameID, platformID, platEncID, langID)
table.names.sort()
def build_OS_2(self):
if not self.os2_:
return
table = self.font.get("OS/2")
if not table: # this only happens for unit tests
table = self.font["OS/2"] = newTable("OS/2")
data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0)
table.decompile(data, self.font)
version = 0
if "fstype" in self.os2_:
table.fsType = self.os2_["fstype"]
if "panose" in self.os2_:
panose = getTableModule("OS/2").Panose()
(
panose.bFamilyType,
panose.bSerifStyle,
panose.bWeight,
panose.bProportion,
panose.bContrast,
panose.bStrokeVariation,
panose.bArmStyle,
panose.bLetterForm,
panose.bMidline,
panose.bXHeight,
) = self.os2_["panose"]
table.panose = panose
if "typoascender" in self.os2_:
table.sTypoAscender = self.os2_["typoascender"]
if "typodescender" in self.os2_:
table.sTypoDescender = self.os2_["typodescender"]
if "typolinegap" in self.os2_:
table.sTypoLineGap = self.os2_["typolinegap"]
if "winascent" in self.os2_:
table.usWinAscent = self.os2_["winascent"]
if "windescent" in self.os2_:
table.usWinDescent = self.os2_["windescent"]
if "vendor" in self.os2_:
table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''")
if "weightclass" in self.os2_:
table.usWeightClass = self.os2_["weightclass"]
if "widthclass" in self.os2_:
table.usWidthClass = self.os2_["widthclass"]
if "unicoderange" in self.os2_:
table.setUnicodeRanges(self.os2_["unicoderange"])
if "codepagerange" in self.os2_:
pages = self.build_codepages_(self.os2_["codepagerange"])
table.ulCodePageRange1, table.ulCodePageRange2 = pages
version = 1
if "xheight" in self.os2_:
table.sxHeight = self.os2_["xheight"]
version = 2
if "capheight" in self.os2_:
table.sCapHeight = self.os2_["capheight"]
version = 2
if "loweropsize" in self.os2_:
table.usLowerOpticalPointSize = self.os2_["loweropsize"]
version = 5
if "upperopsize" in self.os2_:
table.usUpperOpticalPointSize = self.os2_["upperopsize"]
version = 5
def checkattr(table, attrs):
for attr in attrs:
if not hasattr(table, attr):
setattr(table, attr, 0)
table.version = max(version, table.version)
# this only happens for unit tests
if version >= 1:
checkattr(table, ("ulCodePageRange1", "ulCodePageRange2"))
if version >= 2:
checkattr(
table,
(
"sxHeight",
"sCapHeight",
"usDefaultChar",
"usBreakChar",
"usMaxContext",
),
)
if version >= 5:
checkattr(table, ("usLowerOpticalPointSize", "usUpperOpticalPointSize"))
def setElidedFallbackName(self, value, location):
# ElidedFallbackName is a convenience method for setting
# ElidedFallbackNameID so only one can be allowed
for token in ("ElidedFallbackName", "ElidedFallbackNameID"):
if token in self.stat_:
raise FeatureLibError(
f"{token} is already set.",
location,
)
if isinstance(value, int):
self.stat_["ElidedFallbackNameID"] = value
elif isinstance(value, list):
self.stat_["ElidedFallbackName"] = value
else:
raise AssertionError(value)
def addDesignAxis(self, designAxis, location):
if "DesignAxes" not in self.stat_:
self.stat_["DesignAxes"] = []
if designAxis.tag in (r.tag for r in self.stat_["DesignAxes"]):
raise FeatureLibError(
f'DesignAxis already defined for tag "{designAxis.tag}".',
location,
)
if designAxis.axisOrder in (r.axisOrder for r in self.stat_["DesignAxes"]):
raise FeatureLibError(
f"DesignAxis already defined for axis number {designAxis.axisOrder}.",
location,
)
self.stat_["DesignAxes"].append(designAxis)
def addAxisValueRecord(self, axisValueRecord, location):
if "AxisValueRecords" not in self.stat_:
self.stat_["AxisValueRecords"] = []
# Check for duplicate AxisValueRecords
for record_ in self.stat_["AxisValueRecords"]:
if (
{n.asFea() for n in record_.names}
== {n.asFea() for n in axisValueRecord.names}
and {n.asFea() for n in record_.locations}
== {n.asFea() for n in axisValueRecord.locations}
and record_.flags == axisValueRecord.flags
):
raise FeatureLibError(
"An AxisValueRecord with these values is already defined.",
location,
)
self.stat_["AxisValueRecords"].append(axisValueRecord)
def build_STAT(self):
if not self.stat_:
return
axes = self.stat_.get("DesignAxes")
if not axes:
raise FeatureLibError("DesignAxes not defined", None)
axisValueRecords = self.stat_.get("AxisValueRecords")
axisValues = {}
format4_locations = []
for tag in axes:
axisValues[tag.tag] = []
if axisValueRecords is not None:
for avr in axisValueRecords:
valuesDict = {}
if avr.flags > 0:
valuesDict["flags"] = avr.flags
if len(avr.locations) == 1:
location = avr.locations[0]
values = location.values
if len(values) == 1: # format1
valuesDict.update({"value": values[0], "name": avr.names})
if len(values) == 2: # format3
valuesDict.update(
{
"value": values[0],
"linkedValue": values[1],
"name": avr.names,
}
)
if len(values) == 3: # format2
nominal, minVal, maxVal = values
valuesDict.update(
{
"nominalValue": nominal,
"rangeMinValue": minVal,
"rangeMaxValue": maxVal,
"name": avr.names,
}
)
axisValues[location.tag].append(valuesDict)
else:
valuesDict.update(
{
"location": {i.tag: i.values[0] for i in avr.locations},
"name": avr.names,
}
)
format4_locations.append(valuesDict)
designAxes = [
{
"ordering": a.axisOrder,
"tag": a.tag,
"name": a.names,
"values": axisValues[a.tag],
}
for a in axes
]
nameTable = self.font.get("name")
if not nameTable: # this only happens for unit tests
nameTable = self.font["name"] = newTable("name")
nameTable.names = []
if "ElidedFallbackNameID" in self.stat_:
nameID = self.stat_["ElidedFallbackNameID"]
name = nameTable.getDebugName(nameID)
if not name:
raise FeatureLibError(
f"ElidedFallbackNameID {nameID} points "
"to a nameID that does not exist in the "
'"name" table',
None,
)
elif "ElidedFallbackName" in self.stat_:
nameID = self.stat_["ElidedFallbackName"]
otl.buildStatTable(
self.font,
designAxes,
locations=format4_locations,
elidedFallbackName=nameID,
)
def build_codepages_(self, pages):
pages2bits = {
1252: 0,
1250: 1,
1251: 2,
1253: 3,
1254: 4,
1255: 5,
1256: 6,
1257: 7,
1258: 8,
874: 16,
932: 17,
936: 18,
949: 19,
950: 20,
1361: 21,
869: 48,
866: 49,
865: 50,
864: 51,
863: 52,
862: 53,
861: 54,
860: 55,
857: 56,
855: 57,
852: 58,
775: 59,
737: 60,
708: 61,
850: 62,
437: 63,
}
bits = [pages2bits[p] for p in pages if p in pages2bits]
pages = []
for i in range(2):
pages.append("")
for j in range(i * 32, (i + 1) * 32):
if j in bits:
pages[i] += "1"
else:
pages[i] += "0"
return [binary2num(p[::-1]) for p in pages]
def buildBASE(self):
if not self.base_horiz_axis_ and not self.base_vert_axis_:
return None
base = otTables.BASE()
base.Version = 0x00010000
base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_)
base.VertAxis = self.buildBASEAxis(self.base_vert_axis_)
result = newTable("BASE")
result.table = base
return result
def buildBASECoord(self, c):
coord = otTables.BaseCoord()
coord.Format = 1
coord.Coordinate = c
return coord
def buildBASEAxis(self, axis):
if not axis:
return
bases, scripts, minmax = axis
axis = otTables.Axis()
axis.BaseTagList = otTables.BaseTagList()
axis.BaseTagList.BaselineTag = bases
axis.BaseTagList.BaseTagCount = len(bases)
axis.BaseScriptList = otTables.BaseScriptList()
axis.BaseScriptList.BaseScriptRecord = []
axis.BaseScriptList.BaseScriptCount = len(scripts)
for script in sorted(scripts):
minmax_for_script = [
record[1:] for record in minmax if record[0] == script[0]
]
record = otTables.BaseScriptRecord()
record.BaseScriptTag = script[0]
record.BaseScript = otTables.BaseScript()
record.BaseScript.BaseValues = otTables.BaseValues()
record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1])
record.BaseScript.BaseValues.BaseCoord = []
record.BaseScript.BaseValues.BaseCoordCount = len(script[2])
record.BaseScript.BaseLangSysRecord = []
for c in script[2]:
record.BaseScript.BaseValues.BaseCoord.append(self.buildBASECoord(c))
for language, min_coord, max_coord in minmax_for_script:
minmax_record = otTables.MinMax()
minmax_record.MinCoord = self.buildBASECoord(min_coord)
minmax_record.MaxCoord = self.buildBASECoord(max_coord)
minmax_record.FeatMinMaxCount = 0
if language == "dflt":
record.BaseScript.DefaultMinMax = minmax_record
else:
lang_record = otTables.BaseLangSysRecord()
lang_record.BaseLangSysTag = language
lang_record.MinMax = minmax_record
record.BaseScript.BaseLangSysRecord.append(lang_record)
record.BaseScript.BaseLangSysCount = len(
record.BaseScript.BaseLangSysRecord
)
axis.BaseScriptList.BaseScriptRecord.append(record)
return axis
def buildGDEF(self):
gdef = otTables.GDEF()
gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_()
gdef.AttachList = otl.buildAttachList(self.attachPoints_, self.glyphMap)
gdef.LigCaretList = otl.buildLigCaretList(
self.ligCaretCoords_, self.ligCaretPoints_, self.glyphMap
)
gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_()
gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_()
gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000
if self.varstorebuilder:
store = self.varstorebuilder.finish()
if store:
gdef.Version = 0x00010003
gdef.VarStore = store
varidx_map = store.optimize()
gdef.remap_device_varidxes(varidx_map)
if "GPOS" in self.font:
self.font["GPOS"].table.remap_device_varidxes(varidx_map)
self.model_cache.clear()
if any(
(
gdef.GlyphClassDef,
gdef.AttachList,
gdef.LigCaretList,
gdef.MarkAttachClassDef,
gdef.MarkGlyphSetsDef,
)
) or hasattr(gdef, "VarStore"):
result = newTable("GDEF")
result.table = gdef
return result
else:
return None
def buildGDEFGlyphClassDef_(self):
if self.glyphClassDefs_:
classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()}
else:
classes = {}
for lookup in self.lookups_:
classes.update(lookup.inferGlyphClasses())
for markClass in self.parseTree.markClasses.values():
for markClassDef in markClass.definitions:
for glyph in markClassDef.glyphSet():
classes[glyph] = 3
if classes:
result = otTables.GlyphClassDef()
result.classDefs = classes
return result
else:
return None
def buildGDEFMarkAttachClassDef_(self):
classDefs = {g: c for g, (c, _) in self.markAttach_.items()}
if not classDefs:
return None
result = otTables.MarkAttachClassDef()
result.classDefs = classDefs
return result
def buildGDEFMarkGlyphSetsDef_(self):
sets = []
for glyphs, id_ in sorted(
self.markFilterSets_.items(), key=lambda item: item[1]
):
sets.append(glyphs)
return otl.buildMarkGlyphSetsDef(sets, self.glyphMap)
def buildDebg(self):
if "Debg" not in self.font:
self.font["Debg"] = newTable("Debg")
self.font["Debg"].data = {}
self.font["Debg"].data[LOOKUP_DEBUG_INFO_KEY] = self.lookup_locations
def buildLookups_(self, tag):
assert tag in ("GPOS", "GSUB"), tag
for lookup in self.lookups_:
lookup.lookup_index = None
lookups = []
for lookup in self.lookups_:
if lookup.table != tag:
continue
name = self.get_lookup_name_(lookup)
resolved = lookup.promote_lookup_type(is_named_lookup=name is not None)
if resolved is None:
raise FeatureLibError(
"Within a named lookup block, all rules must be of "
"the same lookup type and flag",
lookup.location,
)
for l in resolved:
lookup.lookup_index = len(lookups)
self.lookup_locations[tag][str(lookup.lookup_index)] = LookupDebugInfo(
location=str(lookup.location),
name=name,
feature=None,
)
lookups.append(l)
otLookups = []
for l in lookups:
try:
otLookups.append(l.build())
except OpenTypeLibError as e:
raise FeatureLibError(str(e), e.location) from e
except Exception as e:
location = self.lookup_locations[tag][str(l.lookup_index)].location
raise FeatureLibError(str(e), location) from e
return otLookups
def makeTable(self, tag):
table = getattr(otTables, tag, None)()
table.Version = 0x00010000
table.ScriptList = otTables.ScriptList()
table.ScriptList.ScriptRecord = []
table.FeatureList = otTables.FeatureList()
table.FeatureList.FeatureRecord = []
table.LookupList = otTables.LookupList()
table.LookupList.Lookup = self.buildLookups_(tag)
# Build a table for mapping (tag, lookup_indices) to feature_index.
# For example, ('liga', (2,3,7)) --> 23.
feature_indices = {}
required_feature_indices = {} # ('latn', 'DEU') --> 23
scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24
# Sort the feature table by feature tag:
# https://github.com/fonttools/fonttools/issues/568
sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1])
for key, lookups in sorted(self.features_.items(), key=sortFeatureTag):
script, lang, feature_tag = key
# l.lookup_index will be None when a lookup is not needed
# for the table under construction. For example, substitution
# rules will have no lookup_index while building GPOS tables.
# We also deduplicate lookup indices, as they only get applied once
# within a given feature:
# https://github.com/fonttools/fonttools/issues/2946
lookup_indices = tuple(
dict.fromkeys(
l.lookup_index for l in lookups if l.lookup_index is not None
)
)
size_feature = tag == "GPOS" and feature_tag == "size"
force_feature = self.any_feature_variations(feature_tag, tag)
if len(lookup_indices) == 0 and not size_feature and not force_feature:
continue
for ix in lookup_indices:
try:
self.lookup_locations[tag][str(ix)] = self.lookup_locations[tag][
str(ix)
]._replace(feature=key)
except KeyError:
warnings.warn(
"feaLib.Builder subclass needs upgrading to "
"stash debug information. See fonttools#2065."
)
feature_key = (feature_tag, lookup_indices)
feature_index = feature_indices.get(feature_key)
if feature_index is None:
feature_index = len(table.FeatureList.FeatureRecord)
frec = otTables.FeatureRecord()
frec.FeatureTag = feature_tag
frec.Feature = otTables.Feature()
frec.Feature.FeatureParams = self.buildFeatureParams(feature_tag)
frec.Feature.LookupListIndex = list(lookup_indices)
frec.Feature.LookupCount = len(lookup_indices)
table.FeatureList.FeatureRecord.append(frec)
feature_indices[feature_key] = feature_index
scripts.setdefault(script, {}).setdefault(lang, []).append(feature_index)
if self.required_features_.get((script, lang)) == feature_tag:
required_feature_indices[(script, lang)] = feature_index
# Build ScriptList.
for script, lang_features in sorted(scripts.items()):
srec = otTables.ScriptRecord()
srec.ScriptTag = script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
for lang, feature_indices in sorted(lang_features.items()):
langrec = otTables.LangSysRecord()
langrec.LangSys = otTables.LangSys()
langrec.LangSys.LookupOrder = None
req_feature_index = required_feature_indices.get((script, lang))
if req_feature_index is None:
langrec.LangSys.ReqFeatureIndex = 0xFFFF
else:
langrec.LangSys.ReqFeatureIndex = req_feature_index
langrec.LangSys.FeatureIndex = [
i for i in feature_indices if i != req_feature_index
]
langrec.LangSys.FeatureCount = len(langrec.LangSys.FeatureIndex)
if lang == "dflt":
srec.Script.DefaultLangSys = langrec.LangSys
else:
langrec.LangSysTag = lang
srec.Script.LangSysRecord.append(langrec)
srec.Script.LangSysCount = len(srec.Script.LangSysRecord)
table.ScriptList.ScriptRecord.append(srec)
table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord)
table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord)
table.LookupList.LookupCount = len(table.LookupList.Lookup)
return table
def makeFeatureVariations(self, table, table_tag):
feature_vars = {}
has_any_variations = False
# Sort out which lookups to build, gather their indices
for (_, _, feature_tag), variations in self.feature_variations_.items():
feature_vars[feature_tag] = []
for conditionset, builders in variations.items():
raw_conditionset = self.conditionsets_[conditionset]
indices = []
for b in builders:
if b.table != table_tag:
continue
assert b.lookup_index is not None
indices.append(b.lookup_index)
has_any_variations = True
feature_vars[feature_tag].append((raw_conditionset, indices))
if has_any_variations:
for feature_tag, conditions_and_lookups in feature_vars.items():
addFeatureVariationsRaw(
self.font, table, conditions_and_lookups, feature_tag
)
def any_feature_variations(self, feature_tag, table_tag):
for (_, _, feature), variations in self.feature_variations_.items():
if feature != feature_tag:
continue
for conditionset, builders in variations.items():
if any(b.table == table_tag for b in builders):
return True
return False
def get_lookup_name_(self, lookup):
rev = {v: k for k, v in self.named_lookups_.items()}
if lookup in rev:
return rev[lookup]
return None
def add_language_system(self, location, script, language):
# OpenType Feature File Specification, section 4.b.i
if script == "DFLT" and language == "dflt" and self.default_language_systems_:
raise FeatureLibError(
'If "languagesystem DFLT dflt" is present, it must be '
"the first of the languagesystem statements",
location,
)
if script == "DFLT":
if self.seen_non_DFLT_script_:
raise FeatureLibError(
'languagesystems using the "DFLT" script tag must '
"precede all other languagesystems",
location,
)
else:
self.seen_non_DFLT_script_ = True
if (script, language) in self.default_language_systems_:
raise FeatureLibError(
'"languagesystem %s %s" has already been specified'
% (script.strip(), language.strip()),
location,
)
self.default_language_systems_.add((script, language))
def get_default_language_systems_(self):
# OpenType Feature File specification, 4.b.i. languagesystem:
# If no "languagesystem" statement is present, then the
# implementation must behave exactly as though the following
# statement were present at the beginning of the feature file:
# languagesystem DFLT dflt;
if self.default_language_systems_:
return frozenset(self.default_language_systems_)
else:
return frozenset({("DFLT", "dflt")})
def start_feature(self, location, name, use_extension=False):
if use_extension and name != "aalt":
raise FeatureLibError(
"'useExtension' keyword for feature blocks is allowed only for 'aalt' feature",
location,
)
self.language_systems = self.get_default_language_systems_()
self.script_ = "DFLT"
self.cur_lookup_ = None
self.cur_feature_name_ = name
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.use_extension_ = use_extension
if name == "aalt":
self.aalt_location_ = location
self.aalt_use_extension_ = use_extension
def end_feature(self):
assert self.cur_feature_name_ is not None
self.cur_feature_name_ = None
self.language_systems = None
self.cur_lookup_ = None
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.use_extension_ = False
def start_lookup_block(self, location, name, use_extension=False):
if name in self.named_lookups_:
raise FeatureLibError(
'Lookup "%s" has already been defined' % name, location
)
if self.cur_feature_name_ == "aalt":
raise FeatureLibError(
"Lookup blocks cannot be placed inside 'aalt' features; "
"move it out, and then refer to it with a lookup statement",
location,
)
self.cur_lookup_name_ = name
self.named_lookups_[name] = None
self.cur_lookup_ = None
self.use_extension_ = use_extension
if self.cur_feature_name_ is None:
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def end_lookup_block(self):
assert self.cur_lookup_name_ is not None
self.cur_lookup_name_ = None
self.cur_lookup_ = None
self.use_extension_ = False
if self.cur_feature_name_ is None:
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def add_lookup_call(self, lookup_name):
assert lookup_name in self.named_lookups_, lookup_name
self.cur_lookup_ = None
lookup = self.named_lookups_[lookup_name]
if lookup is not None: # skip empty named lookup
self.add_lookup_to_feature_(lookup, self.cur_feature_name_)
def set_font_revision(self, location, revision):
self.fontRevision_ = revision
def set_language(self, location, language, include_default, required):
assert len(language) == 4
if self.cur_feature_name_ in ("aalt", "size"):
raise FeatureLibError(
"Language statements are not allowed "
'within "feature %s"' % self.cur_feature_name_,
location,
)
if self.cur_feature_name_ is None:
raise FeatureLibError(
"Language statements are not allowed "
"within standalone lookup blocks",
location,
)
self.cur_lookup_ = None
key = (self.script_, language, self.cur_feature_name_)
lookups = self.features_.get((key[0], "dflt", key[2]))
if (language == "dflt" or include_default) and lookups:
self.features_[key] = lookups[:]
else:
# if we aren't including default we need to manually remove the
# default lookups, which were added to all declared langsystems
# as they were encountered (we don't remove all lookups because
# we want to allow duplicate script/lang statements;
# see https://github.com/fonttools/fonttools/issues/3748
cur_lookups = self.features_.get(key, [])
self.features_[key] = [x for x in cur_lookups if x not in lookups]
self.language_systems = frozenset([(self.script_, language)])
if required:
key = (self.script_, language)
if key in self.required_features_:
raise FeatureLibError(
"Language %s (script %s) has already "
"specified feature %s as its required feature"
% (
language.strip(),
self.script_.strip(),
self.required_features_[key].strip(),
),
location,
)
self.required_features_[key] = self.cur_feature_name_
def getMarkAttachClass_(self, location, glyphs):
glyphs = frozenset(glyphs)
id_ = self.markAttachClassID_.get(glyphs)
if id_ is not None:
return id_
id_ = len(self.markAttachClassID_) + 1
self.markAttachClassID_[glyphs] = id_
for glyph in glyphs:
if glyph in self.markAttach_:
_, loc = self.markAttach_[glyph]
raise FeatureLibError(
"Glyph %s already has been assigned "
"a MarkAttachmentType at %s" % (glyph, loc),
location,
)
self.markAttach_[glyph] = (id_, location)
return id_
def getMarkFilterSet_(self, location, glyphs):
glyphs = frozenset(glyphs)
id_ = self.markFilterSets_.get(glyphs)
if id_ is not None:
return id_
id_ = len(self.markFilterSets_)
self.markFilterSets_[glyphs] = id_
return id_
def set_lookup_flag(self, location, value, markAttach, markFilter):
value = value & 0xFF
if markAttach is not None:
markAttachClass = self.getMarkAttachClass_(location, markAttach)
value = value | (markAttachClass << 8)
if markFilter is not None:
markFilterSet = self.getMarkFilterSet_(location, markFilter)
value = value | 0x10
self.lookupflag_markFilterSet_ = markFilterSet
else:
self.lookupflag_markFilterSet_ = None
self.lookupflag_ = value
def set_script(self, location, script):
if self.cur_feature_name_ in ("aalt", "size"):
raise FeatureLibError(
"Script statements are not allowed "
'within "feature %s"' % self.cur_feature_name_,
location,
)
if self.cur_feature_name_ is None:
raise FeatureLibError(
"Script statements are not allowed " "within standalone lookup blocks",
location,
)
if self.language_systems == {(script, "dflt")}:
# Nothing to do.
return
self.cur_lookup_ = None
self.script_ = script
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.set_language(location, "dflt", include_default=True, required=False)
def find_lookup_builders_(self, lookups):
"""Helper for building chain contextual substitutions
Given a list of lookup names, finds the LookupBuilder for each name.
If an input name is None, it gets mapped to a None LookupBuilder.
"""
lookup_builders = []
for lookuplist in lookups:
if lookuplist is not None:
lookup_builders.append(
[self.named_lookups_.get(l.name) for l in lookuplist]
)
else:
lookup_builders.append(None)
return lookup_builders
def add_attach_points(self, location, glyphs, contourPoints):
for glyph in glyphs:
self.attachPoints_.setdefault(glyph, set()).update(contourPoints)
def add_feature_reference(self, location, featureName):
if self.cur_feature_name_ != "aalt":
raise FeatureLibError(
'Feature references are only allowed inside "feature aalt"', location
)
self.aalt_features_.append((location, featureName))
def add_featureName(self, tag):
self.featureNames_.add(tag)
def add_cv_parameter(self, tag):
self.cv_parameters_.add(tag)
def add_to_cv_num_named_params(self, tag):
"""Adds new items to ``self.cv_num_named_params_``
or increments the count of existing items."""
if tag in self.cv_num_named_params_:
self.cv_num_named_params_[tag] += 1
else:
self.cv_num_named_params_[tag] = 1
def add_cv_character(self, character, tag):
self.cv_characters_[tag].append(character)
def set_base_axis(self, bases, scripts, vertical, minmax=[]):
if vertical:
self.base_vert_axis_ = (bases, scripts, minmax)
else:
self.base_horiz_axis_ = (bases, scripts, minmax)
def set_size_parameters(
self, location, DesignSize, SubfamilyID, RangeStart, RangeEnd
):
if self.cur_feature_name_ != "size":
raise FeatureLibError(
"Parameters statements are not allowed "
'within "feature %s"' % self.cur_feature_name_,
location,
)
self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd]
for script, lang in self.language_systems:
key = (script, lang, self.cur_feature_name_)
self.features_.setdefault(key, [])
# GSUB rules
def add_any_subst_(self, location, mapping):
lookup = self.get_lookup_(location, AnySubstBuilder, mapping=mapping)
for key, value in mapping.items():
if key in lookup.mapping:
if value == lookup.mapping[key]:
log.info(
'Removing duplicate substitution from "%s" to "%s" at %s',
", ".join(key),
", ".join(value),
location,
)
else:
raise FeatureLibError(
'Already defined substitution for "%s"' % ", ".join(key),
location,
)
lookup.mapping[key] = value
# GSUB 1
def add_single_subst(self, location, prefix, suffix, mapping, forceChain):
if self.cur_feature_name_ == "aalt":
for from_glyph, to_glyph in mapping.items():
alts = self.aalt_alternates_.setdefault(from_glyph, [])
if to_glyph not in alts:
alts.append(to_glyph)
return
if prefix or suffix or forceChain:
self.add_single_subst_chained_(location, prefix, suffix, mapping)
return
self.add_any_subst_(
location,
{(key,): (value,) for key, value in mapping.items()},
)
# GSUB 2
def add_multiple_subst(
self, location, prefix, glyph, suffix, replacements, forceChain=False
):
if prefix or suffix or forceChain:
self.add_multi_subst_chained_(location, prefix, glyph, suffix, replacements)
return
self.add_any_subst_(
location,
{(glyph,): tuple(replacements)},
)
# GSUB 3
def add_alternate_subst(self, location, prefix, glyph, suffix, replacement):
if self.cur_feature_name_ == "aalt":
alts = self.aalt_alternates_.setdefault(glyph, [])
alts.extend(g for g in replacement if g not in alts)
return
if prefix or suffix:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
lookup = self.get_chained_lookup_(location, AlternateSubstBuilder)
chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [lookup]))
else:
lookup = self.get_lookup_(location, AlternateSubstBuilder)
if glyph in lookup.alternates:
raise FeatureLibError(
'Already defined alternates for glyph "%s"' % glyph, location
)
# We allow empty replacement glyphs here.
lookup.alternates[glyph] = replacement
# GSUB 4
def add_ligature_subst(
self, location, prefix, glyphs, suffix, replacement, forceChain
):
if prefix or suffix or forceChain:
self.add_ligature_subst_chained_(
location, prefix, glyphs, suffix, replacement
)
return
if not all(glyphs):
raise FeatureLibError("Empty glyph class in substitution", location)
# OpenType feature file syntax, section 5.d, "Ligature substitution":
# "Since the OpenType specification does not allow ligature
# substitutions to be specified on target sequences that contain
# glyph classes, the implementation software will enumerate
# all specific glyph sequences if glyph classes are detected"
self.add_any_subst_(
location,
{g: (replacement,) for g in itertools.product(*glyphs)},
)
# GSUB 5/6
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
lookup.rules.append(
ChainContextualRule(
prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
)
)
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
if not mapping or not all(prefix) or not all(suffix):
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
# https://github.com/fonttools/fonttools/issues/512
# https://github.com/fonttools/fonttools/issues/2150
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_subst(mapping, SingleSubstBuilder)
if sub is None:
sub = self.get_chained_lookup_(location, SingleSubstBuilder)
sub.mapping.update(mapping)
chain.rules.append(
ChainContextualRule(prefix, [list(mapping.keys())], suffix, [sub])
)
def add_multi_subst_chained_(self, location, prefix, glyph, suffix, replacements):
if not all(prefix) or not all(suffix):
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
# https://github.com/fonttools/fonttools/issues/3551
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_subst({glyph: replacements}, MultipleSubstBuilder)
if sub is None:
sub = self.get_chained_lookup_(location, MultipleSubstBuilder)
sub.mapping[glyph] = replacements
chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [sub]))
def add_ligature_subst_chained_(
self, location, prefix, glyphs, suffix, replacement
):
# https://github.com/fonttools/fonttools/issues/3701
if not all(prefix) or not all(suffix):
raise FeatureLibError(
"Empty glyph class in contextual substitution", location
)
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_ligature_subst(glyphs, replacement)
if sub is None:
sub = self.get_chained_lookup_(location, LigatureSubstBuilder)
for g in itertools.product(*glyphs):
existing = sub.ligatures.get(g, replacement)
if existing != replacement:
raise FeatureLibError(
f"Conflicting ligature sub rules: '{g}' maps to '{existing}' and '{replacement}'",
location,
)
sub.ligatures[g] = replacement
chain.rules.append(ChainContextualRule(prefix, glyphs, suffix, [sub]))
# GSUB 8
def add_reverse_chain_single_subst(self, location, old_prefix, old_suffix, mapping):
if not mapping:
raise FeatureLibError("Empty glyph class in substitution", location)
lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder)
lookup.rules.append((old_prefix, old_suffix, mapping))
# GPOS rules
# GPOS 1
def add_single_pos(self, location, prefix, suffix, pos, forceChain):
if prefix or suffix or forceChain:
self.add_single_pos_chained_(location, prefix, suffix, pos)
else:
lookup = self.get_lookup_(location, SinglePosBuilder)
for glyphs, value in pos:
if not glyphs:
raise FeatureLibError(
"Empty glyph class in positioning rule", location
)
otValueRecord = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
for glyph in glyphs:
try:
lookup.add_pos(location, glyph, otValueRecord)
except OpenTypeLibError as e:
raise FeatureLibError(str(e), e.location) from e
# GPOS 2
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
if not glyphclass1 or not glyphclass2:
raise FeatureLibError("Empty glyph class in positioning rule", location)
lookup = self.get_lookup_(location, PairPosBuilder)
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
cls1 = tuple(sorted(set(glyphclass1)))
cls2 = tuple(sorted(set(glyphclass2)))
lookup.addClassPair(location, cls1, v1, cls2, v2)
def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2):
if not glyph1 or not glyph2:
raise FeatureLibError("Empty glyph class in positioning rule", location)
lookup = self.get_lookup_(location, PairPosBuilder)
v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True)
v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True)
lookup.addGlyphPair(location, glyph1, v1, glyph2, v2)
# GPOS 3
def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor):
if not glyphclass:
raise FeatureLibError("Empty glyph class in positioning rule", location)
lookup = self.get_lookup_(location, CursivePosBuilder)
lookup.add_attachment(
location,
glyphclass,
self.makeOpenTypeAnchor(location, entryAnchor),
self.makeOpenTypeAnchor(location, exitAnchor),
)
# GPOS 4
def add_mark_base_pos(self, location, bases, marks):
builder = self.get_lookup_(location, MarkBasePosBuilder)
self.add_marks_(location, builder, marks)
if not bases:
raise FeatureLibError("Empty glyph class in positioning rule", location)
for baseAnchor, markClass in marks:
otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor)
for base in bases:
builder.bases.setdefault(base, {})[markClass.name] = otBaseAnchor
# GPOS 5
def add_mark_lig_pos(self, location, ligatures, components):
builder = self.get_lookup_(location, MarkLigPosBuilder)
componentAnchors = []
if not ligatures:
raise FeatureLibError("Empty glyph class in positioning rule", location)
for marks in components:
anchors = {}
self.add_marks_(location, builder, marks)
for ligAnchor, markClass in marks:
anchors[markClass.name] = self.makeOpenTypeAnchor(location, ligAnchor)
componentAnchors.append(anchors)
for glyph in ligatures:
builder.ligatures[glyph] = componentAnchors
# GPOS 6
def add_mark_mark_pos(self, location, baseMarks, marks):
builder = self.get_lookup_(location, MarkMarkPosBuilder)
self.add_marks_(location, builder, marks)
if not baseMarks:
raise FeatureLibError("Empty glyph class in positioning rule", location)
for baseAnchor, markClass in marks:
otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor)
for baseMark in baseMarks:
builder.baseMarks.setdefault(baseMark, {})[
markClass.name
] = otBaseAnchor
# GPOS 7/8
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
if not all(glyphs) or not all(prefix) or not all(suffix):
raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
lookup = self.get_lookup_(location, ChainContextPosBuilder)
lookup.rules.append(
ChainContextualRule(
prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
)
)
def add_single_pos_chained_(self, location, prefix, suffix, pos):
if not pos or not all(prefix) or not all(suffix):
raise FeatureLibError(
"Empty glyph class in contextual positioning rule", location
)
# https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = []
for _, _, _, lookups in chain.rules:
targets.extend(lookups)
subs = []
for glyphs, value in pos:
if value is None:
subs.append(None)
continue
otValue = self.makeOpenTypeValueRecord(
location, value, pairPosContext=False
)
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder)
targets.append(sub)
for glyph in glyphs:
sub.add_pos(location, glyph, otValue)
subs.append(sub)
assert len(pos) == len(subs), (pos, subs)
chain.rules.append(
ChainContextualRule(prefix, [g for g, v in pos], suffix, subs)
)
def add_marks_(self, location, lookupBuilder, marks):
"""Helper for add_mark_{base,liga,mark}_pos."""
for _, markClass in marks:
for markClassDef in markClass.definitions:
for mark in markClassDef.glyphs.glyphSet():
if mark not in lookupBuilder.marks:
otMarkAnchor = self.makeOpenTypeAnchor(
location, copy.deepcopy(markClassDef.anchor)
)
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
else:
existingMarkClass = lookupBuilder.marks[mark][0]
if markClass.name != existingMarkClass:
raise FeatureLibError(
"Glyph %s cannot be in both @%s and @%s"
% (mark, existingMarkClass, markClass.name),
location,
)
def add_subtable_break(self, location):
self.cur_lookup_.add_subtable_break(location)
def setGlyphClass_(self, location, glyph, glyphClass):
oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None))
if oldClass and oldClass != glyphClass:
raise FeatureLibError(
"Glyph %s was assigned to a different class at %s"
% (glyph, oldLocation),
location,
)
self.glyphClassDefs_[glyph] = (glyphClass, location)
def add_glyphClassDef(
self, location, baseGlyphs, ligatureGlyphs, markGlyphs, componentGlyphs
):
for glyph in baseGlyphs:
self.setGlyphClass_(location, glyph, 1)
for glyph in ligatureGlyphs:
self.setGlyphClass_(location, glyph, 2)
for glyph in markGlyphs:
self.setGlyphClass_(location, glyph, 3)
for glyph in componentGlyphs:
self.setGlyphClass_(location, glyph, 4)
def add_ligatureCaretByIndex_(self, location, glyphs, carets):
for glyph in glyphs:
if glyph not in self.ligCaretPoints_:
self.ligCaretPoints_[glyph] = carets
def makeLigCaret(self, location, caret):
if not isinstance(caret, VariableScalar):
return caret
default, device = self.makeVariablePos(location, caret)
if device is not None:
return (default, device)
return default
def add_ligatureCaretByPos_(self, location, glyphs, carets):
carets = [self.makeLigCaret(location, caret) for caret in carets]
for glyph in glyphs:
if glyph not in self.ligCaretCoords_:
self.ligCaretCoords_[glyph] = carets
def add_name_record(self, location, nameID, platformID, platEncID, langID, string):
self.names_.append([nameID, platformID, platEncID, langID, string])
def add_os2_field(self, key, value):
self.os2_[key] = value
def add_hhea_field(self, key, value):
self.hhea_[key] = value
def add_vhea_field(self, key, value):
self.vhea_[key] = value
def add_conditionset(self, location, key, value):
if "fvar" not in self.font:
raise FeatureLibError(
"Cannot add feature variations to a font without an 'fvar' table",
location,
)
# Normalize
axisMap = {
axis.axisTag: (axis.minValue, axis.defaultValue, axis.maxValue)
for axis in self.axes
}
value = {
tag: (
normalizeValue(bottom, axisMap[tag]),
normalizeValue(top, axisMap[tag]),
)
for tag, (bottom, top) in value.items()
}
# NOTE: This might result in rounding errors (off-by-ones) compared to
# rules in Designspace files, since we're working with what's in the
# `avar` table rather than the original values.
if "avar" in self.font:
mapping = self.font["avar"].segments
value = {
axis: tuple(
piecewiseLinearMap(v, mapping[axis]) if axis in mapping else v
for v in condition_range
)
for axis, condition_range in value.items()
}
self.conditionsets_[key] = value
def makeVariablePos(self, location, varscalar):
if not self.varstorebuilder:
raise FeatureLibError(
"Can't define a variable scalar in a non-variable font", location
)
varscalar.axes = self.axes
if not varscalar.does_vary:
return varscalar.default, None
default, index = varscalar.add_to_variation_store(
self.varstorebuilder, self.model_cache, self.font.get("avar")
)
device = None
if index is not None and index != 0xFFFFFFFF:
device = buildVarDevTable(index)
return default, device
def makeAnchorPos(self, varscalar, deviceTable, location):
device = None
if not isinstance(varscalar, VariableScalar):
if deviceTable is not None:
device = otl.buildDevice(dict(deviceTable))
return varscalar, device
default, device = self.makeVariablePos(location, varscalar)
if device is not None and deviceTable is not None:
raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
return default, device
def makeOpenTypeAnchor(self, location, anchor):
"""ast.Anchor --> otTables.Anchor"""
if anchor is None:
return None
deviceX, deviceY = None, None
if anchor.xDeviceTable is not None:
deviceX = otl.buildDevice(dict(anchor.xDeviceTable))
if anchor.yDeviceTable is not None:
deviceY = otl.buildDevice(dict(anchor.yDeviceTable))
x, deviceX = self.makeAnchorPos(anchor.x, anchor.xDeviceTable, location)
y, deviceY = self.makeAnchorPos(anchor.y, anchor.yDeviceTable, location)
otlanchor = otl.buildAnchor(x, y, anchor.contourpoint, deviceX, deviceY)
return otlanchor
_VALUEREC_ATTRS = {
name[0].lower() + name[1:]: (name, isDevice)
for _, name, isDevice, _ in otBase.valueRecordFormat
if not name.startswith("Reserved")
}
def makeOpenTypeValueRecord(self, location, v, pairPosContext):
"""ast.ValueRecord --> otBase.ValueRecord"""
if not v:
return None
vr = {}
for astName, (otName, isDevice) in self._VALUEREC_ATTRS.items():
val = getattr(v, astName, None)
if not val:
continue
if isDevice:
vr[otName] = otl.buildDevice(dict(val))
elif isinstance(val, VariableScalar):
otDeviceName = otName[0:4] + "Device"
feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:]
if getattr(v, feaDeviceName):
raise FeatureLibError(
"Can't define a device coordinate and variable scalar", location
)
vr[otName], device = self.makeVariablePos(location, val)
if device is not None:
vr[otDeviceName] = device
else:
vr[otName] = val
if pairPosContext and not vr:
vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0}
valRec = otl.buildValue(vr)
return valRec
venv\Lib\site-packages\fontTools\feaLib\error.py
class FeatureLibError(Exception):
def __init__(self, message, location=None):
Exception.__init__(self, message)
self.location = location
def __str__(self):
message = Exception.__str__(self)
if self.location:
return f"{self.location}: {message}"
else:
return message
class IncludedFeaNotFound(FeatureLibError):
def __str__(self):
assert self.location is not None
message = (
"The following feature file should be included but cannot be found: "
f"{Exception.__str__(self)}"
)
return f"{self.location}: {message}"
venv\Lib\site-packages\fontTools\feaLib\lexer.py
from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound
from fontTools.feaLib.location import FeatureLibLocation
import re
import os
try:
import cython
except ImportError:
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
class Lexer(object):
NUMBER = "NUMBER"
HEXADECIMAL = "HEXADECIMAL"
OCTAL = "OCTAL"
NUMBERS = (NUMBER, HEXADECIMAL, OCTAL)
FLOAT = "FLOAT"
STRING = "STRING"
NAME = "NAME"
FILENAME = "FILENAME"
GLYPHCLASS = "GLYPHCLASS"
CID = "CID"
SYMBOL = "SYMBOL"
COMMENT = "COMMENT"
NEWLINE = "NEWLINE"
ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_SYMBOL_ = ",;:-+'{}[]<>()="
CHAR_DIGIT_ = "0123456789"
CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\"
CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-"
RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$")
MODE_NORMAL_ = "NORMAL"
MODE_FILENAME_ = "FILENAME"
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
self.mode_ = Lexer.MODE_NORMAL_
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type != Lexer.NEWLINE:
return (token_type, token, location)
def location_(self):
column = self.pos_ - self.line_start_ + 1
return FeatureLibLocation(self.filename_ or "", self.line_, column)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += 2 if next_char == "\n" else 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "#":
self.scan_until_(Lexer.CHAR_NEWLINE_)
return (Lexer.COMMENT, text[start : self.pos_], location)
if self.mode_ is Lexer.MODE_FILENAME_:
if cur_char != "(":
raise FeatureLibError("Expected '(' before file name", location)
self.scan_until_(")")
cur_char = text[self.pos_] if self.pos_ < limit else None
if cur_char != ")":
raise FeatureLibError("Expected ')' after file name", location)
self.pos_ += 1
self.mode_ = Lexer.MODE_NORMAL_
return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location)
if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location)
if cur_char == "@":
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
glyphclass = text[start + 1 : self.pos_]
if len(glyphclass) < 1:
raise FeatureLibError("Expected glyph class name", location)
if not Lexer.RE_GLYPHCLASS.match(glyphclass):
raise FeatureLibError(
"Glyph class names must consist of letters, digits, "
"underscore, period or hyphen",
location,
)
return (Lexer.GLYPHCLASS, glyphclass, location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start : self.pos_]
if token == "include":
self.mode_ = Lexer.MODE_FILENAME_
return (Lexer.NAME, token, location)
if cur_char == "0" and next_char in "xX":
self.pos_ += 2
self.scan_over_(Lexer.CHAR_HEXDIGIT_)
return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location)
if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.OCTAL, int(text[start : self.pos_], 8), location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char in Lexer.CHAR_SYMBOL_:
self.pos_ += 1
return (Lexer.SYMBOL, cur_char, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
# strip newlines embedded within a string
string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1])
return (Lexer.STRING, string, location)
else:
raise FeatureLibError("Expected '\"' to terminate string", location)
raise FeatureLibError("Unexpected character: %r" % cur_char, location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p
def scan_anonymous_block(self, tag):
location = self.location_()
tag = tag.strip()
self.scan_until_(Lexer.CHAR_NEWLINE_)
self.scan_over_(Lexer.CHAR_NEWLINE_)
regexp = r"}\s*" + tag + r"\s*;"
split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1)
if len(split) != 2:
raise FeatureLibError(
"Expected '} %s;' to terminate anonymous block" % tag, location
)
self.pos_ += len(split[0])
return (Lexer.ANONYMOUS_BLOCK, split[0], location)
class IncludingLexer(object):
"""A Lexer that follows include statements.
The OpenType feature file specification states that due to
historical reasons, relative imports should be resolved in this
order:
1. If the source font is UFO format, then relative to the UFO's
font directory
2. relative to the top-level include file
3. relative to the parent include file
We only support 1 (via includeDir) and 2.
"""
def __init__(self, featurefile, *, includeDir=None):
"""Initializes an IncludingLexer.
Behavior:
If includeDir is passed, it will be used to determine the top-level
include directory to use for all encountered include statements. If it is
not passed, ``os.path.dirname(featurefile)`` will be considered the
include directory.
"""
self.lexers_ = [self.make_lexer_(featurefile)]
self.featurefilepath = self.lexers_[0].filename_
self.includeDir = includeDir
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while self.lexers_:
lexer = self.lexers_[-1]
try:
token_type, token, location = next(lexer)
except StopIteration:
self.lexers_.pop()
continue
if token_type is Lexer.NAME and token == "include":
fname_type, fname_token, fname_location = lexer.next()
if fname_type is not Lexer.FILENAME:
raise FeatureLibError("Expected file name", fname_location)
# semi_type, semi_token, semi_location = lexer.next()
# if semi_type is not Lexer.SYMBOL or semi_token != ";":
# raise FeatureLibError("Expected ';'", semi_location)
if os.path.isabs(fname_token):
path = fname_token
else:
if self.includeDir is not None:
curpath = self.includeDir
elif self.featurefilepath is not None:
curpath = os.path.dirname(self.featurefilepath)
else:
# if the IncludingLexer was initialized from an in-memory
# file-like stream, it doesn't have a 'name' pointing to
# its filesystem path, therefore we fall back to using the
# current working directory to resolve relative includes
curpath = os.getcwd()
path = os.path.join(curpath, fname_token)
if len(self.lexers_) >= 5:
raise FeatureLibError("Too many recursive includes", fname_location)
try:
self.lexers_.append(self.make_lexer_(path))
except FileNotFoundError as err:
raise IncludedFeaNotFound(fname_token, fname_location) from err
else:
return (token_type, token, location)
raise StopIteration()
@staticmethod
def make_lexer_(file_or_path):
if hasattr(file_or_path, "read"):
fileobj, closing = file_or_path, False
else:
filename, closing = file_or_path, True
fileobj = open(filename, "r", encoding="utf-8-sig")
data = fileobj.read()
filename = getattr(fileobj, "name", None)
if closing:
fileobj.close()
return Lexer(data, filename)
def scan_anonymous_block(self, tag):
return self.lexers_[-1].scan_anonymous_block(tag)
class NonIncludingLexer(IncludingLexer):
"""Lexer that does not follow `include` statements, emits them as-is."""
def __next__(self): # Python 3
return next(self.lexers_[0])
from typing import NamedTuple
class FeatureLibLocation(NamedTuple):
"""A location in a feature file"""
file: str
line: int
column: int
def __str__(self):
return f"{self.file}:{self.line}:{self.column}"
from typing import NamedTuple
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
class LookupDebugInfo(NamedTuple):
"""Information about where a lookup came from, to be embedded in a font"""
location: str
name: str
feature: list
venv\Lib\site-packages\fontTools\feaLib\parser.py
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer
from fontTools.feaLib.variableScalar import VariableScalar
from fontTools.misc.encodingTools import getEncoding
from fontTools.misc.textTools import bytechr, tobytes, tostr
import fontTools.feaLib.ast as ast
import logging
import os
import re
log = logging.getLogger(__name__)
class Parser(object):
"""Initializes a Parser object.
Example:
.. code:: python
from fontTools.feaLib.parser import Parser
parser = Parser(file, font.getReverseGlyphMap())
parsetree = parser.parse()
Note: the ``glyphNames`` iterable serves a double role to help distinguish
glyph names from ranges in the presence of hyphens and to ensure that glyph
names referenced in a feature file are actually part of a font's glyph set.
If the iterable is left empty, no glyph name in glyph set checking takes
place, and all glyph tokens containing hyphens are treated as literal glyph
names, not as ranges. (Adding a space around the hyphen can, in any case,
help to disambiguate ranges from glyph names containing hyphens.)
By default, the parser will follow ``include()`` statements in the feature
file. To turn this off, pass ``followIncludes=False``. Pass a directory string as
``includeDir`` to explicitly declare a directory to search included feature files
in.
"""
extensions = {}
ast = ast
SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20 + 1)}
CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99 + 1)}
def __init__(
self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs
):
if "glyphMap" in kwargs:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead")
if glyphNames:
raise TypeError(
"'glyphNames' and (deprecated) 'glyphMap' are " "mutually exclusive"
)
glyphNames = kwargs.pop("glyphMap")
if kwargs:
raise TypeError(
"unsupported keyword argument%s: %s"
% ("" if len(kwargs) == 1 else "s", ", ".join(repr(k) for k in kwargs))
)
self.glyphNames_ = set(glyphNames)
self.doc_ = self.ast.FeatureFile()
self.anchors_ = SymbolTable()
self.glyphclasses_ = SymbolTable()
self.lookups_ = SymbolTable()
self.valuerecords_ = SymbolTable()
self.symbol_tables_ = {self.anchors_, self.valuerecords_}
self.next_token_type_, self.next_token_ = (None, None)
self.cur_comments_ = []
self.next_token_location_ = None
lexerClass = IncludingLexer if followIncludes else NonIncludingLexer
self.lexer_ = lexerClass(featurefile, includeDir=includeDir)
self.missing = {}
self.advance_lexer_(comments=True)
def parse(self):
"""Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile`
object representing the root of the abstract syntax tree containing the
parsed contents of the file."""
statements = self.doc_.statements
while self.next_token_type_ is not None or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("include"):
statements.append(self.parse_include_())
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_(("anon", "anonymous")):
statements.append(self.parse_anonymous_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_("languagesystem"):
statements.append(self.parse_languagesystem_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical=False))
elif self.is_cur_keyword_("markClass"):
statements.append(self.parse_markClass_())
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_block_())
elif self.is_cur_keyword_("conditionset"):
statements.append(self.parse_conditionset_())
elif self.is_cur_keyword_("variation"):
statements.append(self.parse_feature_block_(variation=True))
elif self.is_cur_keyword_("table"):
statements.append(self.parse_table_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(self.parse_valuerecord_definition_(vertical=False))
elif (
self.cur_token_type_ is Lexer.NAME
and self.cur_token_ in self.extensions
):
statements.append(self.extensions[self.cur_token_](self))
elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected feature, languagesystem, lookup, markClass, "
'table, or glyph class definition, got {} "{}"'.format(
self.cur_token_type_, self.cur_token_
),
self.cur_token_location_,
)
# Report any missing glyphs at the end of parsing
if self.missing:
error = [
" %s (first found at %s)" % (name, loc)
for name, loc in self.missing.items()
]
raise FeatureLibError(
"The following glyph names are referenced but are missing from the "
"glyph set:\n" + ("\n".join(error)),
None,
)
return self.doc_
def parse_anchor_(self):
# Parses an anchor in any of the four formats given in the feature
# file specification (2.e.vii).
self.expect_symbol_("<")
self.expect_keyword_("anchor")
location = self.cur_token_location_
if self.next_token_ == "NULL": # Format D
self.expect_keyword_("NULL")
self.expect_symbol_(">")
return None
if self.next_token_type_ == Lexer.NAME: # Format E
name = self.expect_name_()
anchordef = self.anchors_.resolve(name)
if anchordef is None:
raise FeatureLibError(
'Unknown anchor "%s"' % name, self.cur_token_location_
)
self.expect_symbol_(">")
return self.ast.Anchor(
anchordef.x,
anchordef.y,
name=name,
contourpoint=anchordef.contourpoint,
xDeviceTable=None,
yDeviceTable=None,
location=location,
)
x, y = self.expect_number_(variable=True), self.expect_number_(variable=True)
contourpoint = None
if self.next_token_ == "contourpoint": # Format B
self.expect_keyword_("contourpoint")
contourpoint = self.expect_number_()
if self.next_token_ == "<": # Format C
xDeviceTable = self.parse_device_()
yDeviceTable = self.parse_device_()
else:
xDeviceTable, yDeviceTable = None, None
self.expect_symbol_(">")
return self.ast.Anchor(
x,
y,
name=None,
contourpoint=contourpoint,
xDeviceTable=xDeviceTable,
yDeviceTable=yDeviceTable,
location=location,
)
def parse_anchor_marks_(self):
# Parses a sequence of ``[ mark @MARKCLASS]*.``
anchorMarks = [] # [(self.ast.Anchor, markClassName)*]
while self.next_token_ == "<":
anchor = self.parse_anchor_()
if anchor is None and self.next_token_ != "mark":
continue # without mark, eg. in GPOS type 5
self.expect_keyword_("mark")
markClass = self.expect_markClass_reference_()
anchorMarks.append((anchor, markClass))
return anchorMarks
def parse_anchordef_(self):
# Parses a named anchor definition (`section 2.e.viii `_).
assert self.is_cur_keyword_("anchorDef")
location = self.cur_token_location_
x, y = self.expect_number_(), self.expect_number_()
contourpoint = None
if self.next_token_ == "contourpoint":
self.expect_keyword_("contourpoint")
contourpoint = self.expect_number_()
name = self.expect_name_()
self.expect_symbol_(";")
anchordef = self.ast.AnchorDefinition(
name, x, y, contourpoint=contourpoint, location=location
)
self.anchors_.define(name, anchordef)
return anchordef
def parse_anonymous_(self):
# Parses an anonymous data block (`section 10 `_).
assert self.is_cur_keyword_(("anon", "anonymous"))
tag = self.expect_tag_()
_, content, location = self.lexer_.scan_anonymous_block(tag)
self.advance_lexer_()
self.expect_symbol_("}")
end_tag = self.expect_tag_()
assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()"
self.expect_symbol_(";")
return self.ast.AnonymousBlock(tag, content, location=location)
def parse_attach_(self):
# Parses a GDEF Attach statement (`section 9.b `_)
assert self.is_cur_keyword_("Attach")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
contourPoints = {self.expect_number_()}
while self.next_token_ != ";":
contourPoints.add(self.expect_number_())
self.expect_symbol_(";")
return self.ast.AttachStatement(glyphs, contourPoints, location=location)
def parse_enumerate_(self, vertical):
# Parse an enumerated pair positioning rule (`section 6.b.ii `_).
assert self.cur_token_ in {"enumerate", "enum"}
self.advance_lexer_()
return self.parse_position_(enumerated=True, vertical=vertical)
def parse_GlyphClassDef_(self):
# Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;'
assert self.is_cur_keyword_("GlyphClassDef")
location = self.cur_token_location_
if self.next_token_ != ",":
baseGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
baseGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ",":
ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
ligatureGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ",":
markGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
markGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ";":
componentGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
componentGlyphs = None
self.expect_symbol_(";")
return self.ast.GlyphClassDefStatement(
baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=location
)
def parse_glyphclass_definition_(self):
# Parses glyph class definitions such as '@UPPERCASE = [A-Z];'
location, name = self.cur_token_location_, self.cur_token_
self.expect_symbol_("=")
glyphs = self.parse_glyphclass_(accept_glyphname=False)
self.expect_symbol_(";")
glyphclass = self.ast.GlyphClassDefinition(name, glyphs, location=location)
self.glyphclasses_.define(name, glyphclass)
return glyphclass
def split_glyph_range_(self, name, location):
# Since v1.20, the OpenType Feature File specification allows
# for dashes in glyph names. A sequence like "a-b-c-d" could
# therefore mean a single glyph whose name happens to be
# "a-b-c-d", or it could mean a range from glyph "a" to glyph
# "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a
# range from glyph "a-b-c" to glyph "d".Technically, this
# example could be resolved because the (pretty complex)
# definition of glyph ranges renders most of these splits
# invalid. But the specification does not say that a compiler
# should try to apply such fancy heuristics. To encourage
# unambiguous feature files, we therefore try all possible
# splits and reject the feature file if there are multiple
# splits possible. It is intentional that we don't just emit a
# warning; warnings tend to get ignored. To fix the problem,
# font designers can trivially add spaces around the intended
# split point, and we emit a compiler error that suggests
# how exactly the source should be rewritten to make things
# unambiguous.
parts = name.split("-")
solutions = []
for i in range(len(parts)):
start, limit = "-".join(parts[0:i]), "-".join(parts[i:])
if start in self.glyphNames_ and limit in self.glyphNames_:
solutions.append((start, limit))
if len(solutions) == 1:
start, limit = solutions[0]
return start, limit
elif len(solutions) == 0:
raise FeatureLibError(
'"%s" is not a glyph in the font, and it can not be split '
"into a range of known glyphs" % name,
location,
)
else:
ranges = " or ".join(['"%s - %s"' % (s, l) for s, l in solutions])
raise FeatureLibError(
'Ambiguous glyph range "%s"; '
"please use %s to clarify what you mean" % (name, ranges),
location,
)
def parse_glyphclass_(self, accept_glyphname, accept_null=False):
# Parses a glyph class, either named or anonymous, or (if
# ``bool(accept_glyphname)``) a glyph name. If ``bool(accept_null)`` then
# also accept the special NULL glyph.
if accept_glyphname and self.next_token_type_ in (Lexer.NAME, Lexer.CID):
if accept_null and self.next_token_ == "NULL":
# If you want a glyph called NULL, you should escape it.
self.advance_lexer_()
return self.ast.NullGlyph(location=self.cur_token_location_)
glyph = self.expect_glyph_()
self.check_glyph_name_in_glyph_set(glyph)
return self.ast.GlyphName(glyph, location=self.cur_token_location_)
if self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_,
)
if isinstance(gc, self.ast.MarkClass):
return self.ast.MarkClassName(gc, location=self.cur_token_location_)
else:
return self.ast.GlyphClassName(gc, location=self.cur_token_location_)
self.expect_symbol_("[")
location = self.cur_token_location_
glyphs = self.ast.GlyphClass(location=location)
while self.next_token_ != "]":
if self.next_token_type_ is Lexer.NAME:
glyph = self.expect_glyph_()
location = self.cur_token_location_
if "-" in glyph and self.glyphNames_ and glyph not in self.glyphNames_:
start, limit = self.split_glyph_range_(glyph, location)
self.check_glyph_name_in_glyph_set(start, limit)
glyphs.add_range(
start, limit, self.make_glyph_range_(location, start, limit)
)
elif self.next_token_ == "-":
start = glyph
self.expect_symbol_("-")
limit = self.expect_glyph_()
self.check_glyph_name_in_glyph_set(start, limit)
glyphs.add_range(
start, limit, self.make_glyph_range_(location, start, limit)
)
else:
if "-" in glyph and not self.glyphNames_:
log.warning(
str(
FeatureLibError(
f"Ambiguous glyph name that looks like a range: {glyph!r}",
location,
)
)
)
self.check_glyph_name_in_glyph_set(glyph)
glyphs.append(glyph)
elif self.next_token_type_ is Lexer.CID:
glyph = self.expect_glyph_()
if self.next_token_ == "-":
range_location = self.cur_token_location_
range_start = self.cur_token_
self.expect_symbol_("-")
range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set(
f"cid{range_start:05d}",
f"cid{range_end:05d}",
)
glyphs.add_cid_range(
range_start,
range_end,
self.make_cid_range_(range_location, range_start, range_end),
)
else:
glyph_name = f"cid{self.cur_token_:05d}"
self.check_glyph_name_in_glyph_set(glyph_name)
glyphs.append(glyph_name)
elif self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_,
)
if isinstance(gc, self.ast.MarkClass):
gc = self.ast.MarkClassName(gc, location=self.cur_token_location_)
else:
gc = self.ast.GlyphClassName(gc, location=self.cur_token_location_)
glyphs.add_class(gc)
else:
raise FeatureLibError(
"Expected glyph name, glyph range, "
f"or glyph class reference, found {self.next_token_!r}",
self.next_token_location_,
)
self.expect_symbol_("]")
return glyphs
def parse_glyph_pattern_(self, vertical):
# Parses a glyph pattern, including lookups and context, e.g.::
#
# a b
# a b c' d e
# a b c' lookup ChangeC d e
prefix, glyphs, lookups, values, suffix = ([], [], [], [], [])
hasMarks = False
while self.next_token_ not in {"by", "from", ";", ","}:
gc = self.parse_glyphclass_(accept_glyphname=True)
marked = False
if self.next_token_ == "'":
self.expect_symbol_("'")
hasMarks = marked = True
if marked:
if suffix:
# makeotf also reports this as an error, while FontForge
# silently inserts ' in all the intervening glyphs.
# https://github.com/fonttools/fonttools/pull/1096
raise FeatureLibError(
"Unsupported contextual target sequence: at most "
"one run of marked (') glyph/class names allowed",
self.cur_token_location_,
)
glyphs.append(gc)
elif glyphs:
suffix.append(gc)
else:
prefix.append(gc)
if self.is_next_value_():
values.append(self.parse_valuerecord_(vertical))
else:
values.append(None)
lookuplist = None
while self.next_token_ == "lookup":
if lookuplist is None:
lookuplist = []
self.expect_keyword_("lookup")
if not marked:
raise FeatureLibError(
"Lookups can only follow marked glyphs",
self.cur_token_location_,
)
lookup_name = self.expect_name_()
lookup = self.lookups_.resolve(lookup_name)
if lookup is None:
raise FeatureLibError(
'Unknown lookup "%s"' % lookup_name, self.cur_token_location_
)
lookuplist.append(lookup)
if marked:
lookups.append(lookuplist)
if not glyphs and not suffix: # eg., "sub f f i by"
assert lookups == []
return ([], prefix, [None] * len(prefix), values, [], hasMarks)
else:
if any(values[: len(prefix)]):
raise FeatureLibError(
"Positioning cannot be applied in the bactrack glyph sequence, "
"before the marked glyph sequence.",
self.cur_token_location_,
)
marked_values = values[len(prefix) : len(prefix) + len(glyphs)]
if any(marked_values):
if any(values[len(prefix) + len(glyphs) :]):
raise FeatureLibError(
"Positioning values are allowed only in the marked glyph "
"sequence, or after the final glyph node when only one glyph "
"node is marked.",
self.cur_token_location_,
)
values = marked_values
elif values and values[-1]:
if len(glyphs) > 1 or any(values[:-1]):
raise FeatureLibError(
"Positioning values are allowed only in the marked glyph "
"sequence, or after the final glyph node when only one glyph "
"node is marked.",
self.cur_token_location_,
)
values = values[-1:]
elif any(values):
raise FeatureLibError(
"Positioning values are allowed only in the marked glyph "
"sequence, or after the final glyph node when only one glyph "
"node is marked.",
self.cur_token_location_,
)
return (prefix, glyphs, lookups, values, suffix, hasMarks)
def parse_ignore_glyph_pattern_(self, sub):
location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
vertical=False
)
if any(lookups):
raise FeatureLibError(
f'No lookups can be specified for "ignore {sub}"', location
)
if not hasMarks:
error = FeatureLibError(
f'Ambiguous "ignore {sub}", there should be least one marked glyph',
location,
)
log.warning(str(error))
suffix, glyphs = glyphs[1:], glyphs[0:1]
chainContext = (prefix, glyphs, suffix)
return chainContext
def parse_ignore_context_(self, sub):
location = self.cur_token_location_
chainContext = [self.parse_ignore_glyph_pattern_(sub)]
while self.next_token_ == ",":
self.expect_symbol_(",")
chainContext.append(self.parse_ignore_glyph_pattern_(sub))
self.expect_symbol_(";")
return chainContext
def parse_ignore_(self):
# Parses an ignore sub/pos rule.
assert self.is_cur_keyword_("ignore")
location = self.cur_token_location_
self.advance_lexer_()
if self.cur_token_ in ["substitute", "sub"]:
chainContext = self.parse_ignore_context_("sub")
return self.ast.IgnoreSubstStatement(chainContext, location=location)
if self.cur_token_ in ["position", "pos"]:
chainContext = self.parse_ignore_context_("pos")
return self.ast.IgnorePosStatement(chainContext, location=location)
raise FeatureLibError(
'Expected "substitute" or "position"', self.cur_token_location_
)
def parse_include_(self):
assert self.cur_token_ == "include"
location = self.cur_token_location_
filename = self.expect_filename_()
# self.expect_symbol_(";")
return ast.IncludeStatement(filename, location=location)
def parse_language_(self):
assert self.is_cur_keyword_("language")
location = self.cur_token_location_
language = self.expect_language_tag_()
include_default, required = (True, False)
if self.next_token_ in {"exclude_dflt", "include_dflt"}:
include_default = self.expect_name_() == "include_dflt"
if self.next_token_ == "required":
self.expect_keyword_("required")
required = True
self.expect_symbol_(";")
return self.ast.LanguageStatement(
language, include_default, required, location=location
)
def parse_ligatureCaretByIndex_(self):
assert self.is_cur_keyword_("LigatureCaretByIndex")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
carets = [self.expect_number_()]
while self.next_token_ != ";":
carets.append(self.expect_number_())
self.expect_symbol_(";")
return self.ast.LigatureCaretByIndexStatement(glyphs, carets, location=location)
def parse_ligatureCaretByPos_(self):
assert self.is_cur_keyword_("LigatureCaretByPos")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
carets = [self.expect_number_(variable=True)]
while self.next_token_ != ";":
carets.append(self.expect_number_(variable=True))
self.expect_symbol_(";")
return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location)
def parse_lookup_(self, vertical):
# Parses a ``lookup`` - either a lookup block, or a lookup reference
# inside a feature.
assert self.is_cur_keyword_("lookup")
location, name = self.cur_token_location_, self.expect_name_()
if self.next_token_ == ";":
lookup = self.lookups_.resolve(name)
if lookup is None:
raise FeatureLibError(
'Unknown lookup "%s"' % name, self.cur_token_location_
)
self.expect_symbol_(";")
return self.ast.LookupReferenceStatement(lookup, location=location)
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
block = self.ast.LookupBlock(name, use_extension, location=location)
self.parse_block_(block, vertical)
self.lookups_.define(name, block)
return block
def parse_lookupflag_(self):
# Parses a ``lookupflag`` statement, either specified by number or
# in words.
assert self.is_cur_keyword_("lookupflag")
location = self.cur_token_location_
# format B: "lookupflag 6;"
if self.next_token_type_ == Lexer.NUMBER:
value = self.expect_number_()
self.expect_symbol_(";")
return self.ast.LookupFlagStatement(value, location=location)
# format A: "lookupflag RightToLeft MarkAttachmentType @M;"
value_seen = False
value, markAttachment, markFilteringSet = 0, None, None
flags = {
"RightToLeft": 1,
"IgnoreBaseGlyphs": 2,
"IgnoreLigatures": 4,
"IgnoreMarks": 8,
}
seen = set()
while self.next_token_ != ";":
if self.next_token_ in seen:
raise FeatureLibError(
"%s can be specified only once" % self.next_token_,
self.next_token_location_,
)
seen.add(self.next_token_)
if self.next_token_ == "MarkAttachmentType":
self.expect_keyword_("MarkAttachmentType")
markAttachment = self.parse_glyphclass_(accept_glyphname=False)
elif self.next_token_ == "UseMarkFilteringSet":
self.expect_keyword_("UseMarkFilteringSet")
markFilteringSet = self.parse_glyphclass_(accept_glyphname=False)
elif self.next_token_ in flags:
value_seen = True
value = value | flags[self.expect_name_()]
else:
raise FeatureLibError(
'"%s" is not a recognized lookupflag' % self.next_token_,
self.next_token_location_,
)
self.expect_symbol_(";")
if not any([value_seen, markAttachment, markFilteringSet]):
raise FeatureLibError(
"lookupflag must have a value", self.next_token_location_
)
return self.ast.LookupFlagStatement(
value,
markAttachment=markAttachment,
markFilteringSet=markFilteringSet,
location=location,
)
def parse_markClass_(self):
assert self.is_cur_keyword_("markClass")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
if not glyphs.glyphSet():
raise FeatureLibError(
"Empty glyph class in mark class definition", location
)
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")
markClass = self.doc_.markClasses.get(name)
if markClass is None:
markClass = self.ast.MarkClass(name)
self.doc_.markClasses[name] = markClass
self.glyphclasses_.define(name, markClass)
mcdef = self.ast.MarkClassDefinition(
markClass, anchor, glyphs, location=location
)
markClass.addDefinition(mcdef)
return mcdef
def parse_position_(self, enumerated, vertical):
assert self.cur_token_ in {"position", "pos"}
if self.next_token_ == "cursive": # GPOS type 3
return self.parse_position_cursive_(enumerated, vertical)
elif self.next_token_ == "base": # GPOS type 4
return self.parse_position_base_(enumerated, vertical)
elif self.next_token_ == "ligature": # GPOS type 5
return self.parse_position_ligature_(enumerated, vertical)
elif self.next_token_ == "mark": # GPOS type 6
return self.parse_position_mark_(enumerated, vertical)
location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
vertical
)
self.expect_symbol_(";")
if any(lookups):
# GPOS type 8: Chaining contextual positioning; explicit lookups
if any(values):
raise FeatureLibError(
'If "lookup" is present, no values must be specified', location
)
return self.ast.ChainContextPosStatement(
prefix, glyphs, suffix, lookups, location=location
)
# Pair positioning, format A: "pos V 10 A -10;"
# Pair positioning, format B: "pos V A -20;"
if not prefix and not suffix and len(glyphs) == 2 and not hasMarks:
if values[0] is None: # Format B: "pos V A -20;"
values.reverse()
return self.ast.PairPosStatement(
glyphs[0],
values[0],
glyphs[1],
values[1],
enumerated=enumerated,
location=location,
)
if enumerated:
raise FeatureLibError(
'"enumerate" is only allowed with pair positionings', location
)
return self.ast.SinglePosStatement(
list(zip(glyphs, values)),
prefix,
suffix,
forceChain=hasMarks,
location=location,
)
def parse_position_cursive_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("cursive")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with ' "cursive attachment positioning",
location,
)
glyphclass = self.parse_glyphclass_(accept_glyphname=True)
entryAnchor = self.parse_anchor_()
exitAnchor = self.parse_anchor_()
self.expect_symbol_(";")
return self.ast.CursivePosStatement(
glyphclass, entryAnchor, exitAnchor, location=location
)
def parse_position_base_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("base")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
"mark-to-base attachment positioning",
location,
)
base = self.parse_glyphclass_(accept_glyphname=True)
marks = self.parse_anchor_marks_()
self.expect_symbol_(";")
return self.ast.MarkBasePosStatement(base, marks, location=location)
def parse_position_ligature_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("ligature")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
"mark-to-ligature attachment positioning",
location,
)
ligatures = self.parse_glyphclass_(accept_glyphname=True)
marks = [self.parse_anchor_marks_()]
while self.next_token_ == "ligComponent":
self.expect_keyword_("ligComponent")
marks.append(self.parse_anchor_marks_())
self.expect_symbol_(";")
return self.ast.MarkLigPosStatement(ligatures, marks, location=location)
def parse_position_mark_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("mark")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
"mark-to-mark attachment positioning",
location,
)
baseMarks = self.parse_glyphclass_(accept_glyphname=True)
marks = self.parse_anchor_marks_()
self.expect_symbol_(";")
return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location)
def parse_script_(self):
assert self.is_cur_keyword_("script")
location, script = self.cur_token_location_, self.expect_script_tag_()
self.expect_symbol_(";")
return self.ast.ScriptStatement(script, location=location)
def parse_substitute_(self):
assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
location = self.cur_token_location_
reverse = self.cur_token_ in {"reversesub", "rsub"}
(
old_prefix,
old,
lookups,
values,
old_suffix,
hasMarks,
) = self.parse_glyph_pattern_(vertical=False)
if any(values):
raise FeatureLibError(
"Substitution statements cannot contain values", location
)
new = []
if self.next_token_ == "by":
keyword = self.expect_keyword_("by")
while self.next_token_ != ";":
gc = self.parse_glyphclass_(accept_glyphname=True, accept_null=True)
new.append(gc)
elif self.next_token_ == "from":
keyword = self.expect_keyword_("from")
new = [self.parse_glyphclass_(accept_glyphname=False)]
else:
keyword = None
self.expect_symbol_(";")
if len(new) == 0 and not any(lookups):
raise FeatureLibError(
'Expected "by", "from" or explicit lookup references',
self.cur_token_location_,
)
# GSUB lookup type 3: Alternate substitution.
# Format: "substitute a from [a.1 a.2 a.3];"
if keyword == "from":
if reverse:
raise FeatureLibError(
'Reverse chaining substitutions do not support "from"', location
)
if len(old) != 1 or len(old[0].glyphSet()) != 1:
raise FeatureLibError('Expected a single glyph before "from"', location)
if len(new) != 1:
raise FeatureLibError(
'Expected a single glyphclass after "from"', location
)
return self.ast.AlternateSubstStatement(
old_prefix, old[0], old_suffix, new[0], location=location
)
num_lookups = len([l for l in lookups if l is not None])
is_deletion = False
if len(new) == 1 and isinstance(new[0], ast.NullGlyph):
new = [] # Deletion
is_deletion = True
# GSUB lookup type 1: Single substitution.
# Format A: "substitute a by a.sc;"
# Format B: "substitute [one.fitted one.oldstyle] by one;"
# Format C: "substitute [a-d] by [A.sc-D.sc];"
if not reverse and len(old) == 1 and len(new) == 1 and num_lookups == 0:
glyphs = list(old[0].glyphSet())
replacements = list(new[0].glyphSet())
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
"but found a glyph class with %d elements"
% (len(glyphs), len(replacements)),
location,
)
return self.ast.SingleSubstStatement(
old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location
)
# Glyph deletion, built as GSUB lookup type 2: Multiple substitution
# with empty replacement.
if is_deletion and len(old) == 1 and num_lookups == 0:
return self.ast.MultipleSubstStatement(
old_prefix,
old[0],
old_suffix,
(),
forceChain=hasMarks,
location=location,
)
# GSUB lookup type 2: Multiple substitution.
# Format: "substitute f_f_i by f f i;"
#
# GlyphsApp introduces two additional formats:
# Format 1: "substitute [f_i f_l] by [f f] [i l];"
# Format 2: "substitute [f_i f_l] by f [i l];"
# http://handbook.glyphsapp.com/en/layout/multiple-substitution-with-classes/
if not reverse and len(old) == 1 and len(new) > 1 and num_lookups == 0:
count = len(old[0].glyphSet())
for n in new:
if not list(n.glyphSet()):
raise FeatureLibError("Empty class in replacement", location)
if len(n.glyphSet()) != 1 and len(n.glyphSet()) != count:
raise FeatureLibError(
f'Expected a glyph class with 1 or {count} elements after "by", '
f"but found a glyph class with {len(n.glyphSet())} elements",
location,
)
return self.ast.MultipleSubstStatement(
old_prefix,
old[0],
old_suffix,
new,
forceChain=hasMarks,
location=location,
)
# GSUB lookup type 4: Ligature substitution.
# Format: "substitute f f i by f_f_i;"
if (
not reverse
and len(old) > 1
and len(new) == 1
and len(new[0].glyphSet()) == 1
and num_lookups == 0
):
return self.ast.LigatureSubstStatement(
old_prefix,
old,
old_suffix,
list(new[0].glyphSet())[0],
forceChain=hasMarks,
location=location,
)
# GSUB lookup type 8: Reverse chaining substitution.
if reverse:
if len(old) != 1:
raise FeatureLibError(
"In reverse chaining single substitutions, "
"only a single glyph or glyph class can be replaced",
location,
)
if len(new) != 1:
raise FeatureLibError(
"In reverse chaining single substitutions, "
'the replacement (after "by") must be a single glyph '
"or glyph class",
location,
)
if num_lookups != 0:
raise FeatureLibError(
"Reverse chaining substitutions cannot call named lookups", location
)
glyphs = sorted(list(old[0].glyphSet()))
replacements = sorted(list(new[0].glyphSet()))
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
"but found a glyph class with %d elements"
% (len(glyphs), len(replacements)),
location,
)
return self.ast.ReverseChainSingleSubstStatement(
old_prefix, old_suffix, old, new, location=location
)
if len(old) > 1 and len(new) > 1:
raise FeatureLibError(
"Direct substitution of multiple glyphs by multiple glyphs "
"is not supported",
location,
)
# If there are remaining glyphs to parse, this is an invalid GSUB statement
if len(new) != 0 or is_deletion:
raise FeatureLibError("Invalid substitution statement", location)
# GSUB lookup type 6: Chaining contextual substitution.
rule = self.ast.ChainContextSubstStatement(
old_prefix, old, old_suffix, lookups, location=location
)
return rule
def parse_subtable_(self):
assert self.is_cur_keyword_("subtable")
location = self.cur_token_location_
self.expect_symbol_(";")
return self.ast.SubtableStatement(location=location)
def parse_size_parameters_(self):
# Parses a ``parameters`` statement used in ``size`` features. See
# `section 8.b `_.
assert self.is_cur_keyword_("parameters")
location = self.cur_token_location_
DesignSize = self.expect_decipoint_()
SubfamilyID = self.expect_number_()
RangeStart = 0.0
RangeEnd = 0.0
if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0:
RangeStart = self.expect_decipoint_()
RangeEnd = self.expect_decipoint_()
self.expect_symbol_(";")
return self.ast.SizeParameters(
DesignSize, SubfamilyID, RangeStart, RangeEnd, location=location
)
def parse_size_menuname_(self):
assert self.is_cur_keyword_("sizemenuname")
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
return self.ast.FeatureNameStatement(
"size", platformID, platEncID, langID, string, location=location
)
def parse_table_(self):
assert self.is_cur_keyword_("table")
location, name = self.cur_token_location_, self.expect_tag_()
table = self.ast.TableBlock(name, location=location)
self.expect_symbol_("{")
handler = {
"GDEF": self.parse_table_GDEF_,
"head": self.parse_table_head_,
"hhea": self.parse_table_hhea_,
"vhea": self.parse_table_vhea_,
"name": self.parse_table_name_,
"BASE": self.parse_table_BASE_,
"OS/2": self.parse_table_OS_2_,
"STAT": self.parse_table_STAT_,
}.get(name)
if handler:
handler(table)
else:
raise FeatureLibError(
'"table %s" is not supported' % name.strip(), location
)
self.expect_symbol_("}")
end_tag = self.expect_tag_()
if end_tag != name:
raise FeatureLibError(
'Expected "%s"' % name.strip(), self.cur_token_location_
)
self.expect_symbol_(";")
return table
def parse_table_GDEF_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("Attach"):
statements.append(self.parse_attach_())
elif self.is_cur_keyword_("GlyphClassDef"):
statements.append(self.parse_GlyphClassDef_())
elif self.is_cur_keyword_("LigatureCaretByIndex"):
statements.append(self.parse_ligatureCaretByIndex_())
elif self.is_cur_keyword_("LigatureCaretByPos"):
statements.append(self.parse_ligatureCaretByPos_())
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected Attach, LigatureCaretByIndex, " "or LigatureCaretByPos",
self.cur_token_location_,
)
def parse_table_head_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("FontRevision"):
statements.append(self.parse_FontRevision_())
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected FontRevision", self.cur_token_location_)
def parse_table_hhea_(self, table):
statements = table.statements
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
key = self.cur_token_.lower()
value = self.expect_number_()
statements.append(
self.ast.HheaField(key, value, location=self.cur_token_location_)
)
if self.next_token_ != ";":
raise FeatureLibError(
"Incomplete statement", self.next_token_location_
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected CaretOffset, Ascender, " "Descender or LineGap",
self.cur_token_location_,
)
def parse_table_vhea_(self, table):
statements = table.statements
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
key = self.cur_token_.lower()
value = self.expect_number_()
statements.append(
self.ast.VheaField(key, value, location=self.cur_token_location_)
)
if self.next_token_ != ";":
raise FeatureLibError(
"Incomplete statement", self.next_token_location_
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected VertTypoAscender, "
"VertTypoDescender or VertTypoLineGap",
self.cur_token_location_,
)
def parse_table_name_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("nameid"):
statement = self.parse_nameid_()
if statement:
statements.append(statement)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected nameid", self.cur_token_location_)
def parse_name_(self):
"""Parses a name record. See `section 9.e `_."""
platEncID = None
langID = None
if self.next_token_type_ in Lexer.NUMBERS:
platformID = self.expect_any_number_()
location = self.cur_token_location_
if platformID not in (1, 3):
raise FeatureLibError("Expected platform id 1 or 3", location)
if self.next_token_type_ in Lexer.NUMBERS:
platEncID = self.expect_any_number_()
langID = self.expect_any_number_()
else:
platformID = 3
location = self.cur_token_location_
if platformID == 1: # Macintosh
platEncID = platEncID or 0 # Roman
langID = langID or 0 # English
else: # 3, Windows
platEncID = platEncID or 1 # Unicode
langID = langID or 0x0409 # English
string = self.expect_string_()
self.expect_symbol_(";")
encoding = getEncoding(platformID, platEncID, langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", location)
unescaped = self.unescape_string_(string, encoding)
return platformID, platEncID, langID, unescaped
def parse_stat_name_(self):
platEncID = None
langID = None
if self.next_token_type_ in Lexer.NUMBERS:
platformID = self.expect_any_number_()
location = self.cur_token_location_
if platformID not in (1, 3):
raise FeatureLibError("Expected platform id 1 or 3", location)
if self.next_token_type_ in Lexer.NUMBERS:
platEncID = self.expect_any_number_()
langID = self.expect_any_number_()
else:
platformID = 3
location = self.cur_token_location_
if platformID == 1: # Macintosh
platEncID = platEncID or 0 # Roman
langID = langID or 0 # English
else: # 3, Windows
platEncID = platEncID or 1 # Unicode
langID = langID or 0x0409 # English
string = self.expect_string_()
encoding = getEncoding(platformID, platEncID, langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", location)
unescaped = self.unescape_string_(string, encoding)
return platformID, platEncID, langID, unescaped
def parse_nameid_(self):
assert self.cur_token_ == "nameid", self.cur_token_
location, nameID = self.cur_token_location_, self.expect_any_number_()
if nameID > 32767:
raise FeatureLibError(
"Name id value cannot be greater than 32767", self.cur_token_location_
)
platformID, platEncID, langID, string = self.parse_name_()
return self.ast.NameRecord(
nameID, platformID, platEncID, langID, string, location=location
)
def unescape_string_(self, string, encoding):
if encoding == "utf_16_be":
s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string)
else:
unescape = lambda m: self.unescape_byte_(m, encoding)
s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string)
# We now have a Unicode string, but it might contain surrogate pairs.
# We convert surrogates to actual Unicode by round-tripping through
# Python's UTF-16 codec in a special mode.
utf16 = tobytes(s, "utf_16_be", "surrogatepass")
return tostr(utf16, "utf_16_be")
@staticmethod
def unescape_unichr_(match):
n = match.group(0)[1:]
return chr(int(n, 16))
@staticmethod
def unescape_byte_(match, encoding):
n = match.group(0)[1:]
return bytechr(int(n, 16)).decode(encoding)
def find_previous(self, statements, class_):
for previous in reversed(statements):
if isinstance(previous, self.ast.Comment):
continue
elif isinstance(previous, class_):
return previous
else:
# If we find something that doesn't match what we're looking
# for, and isn't a comment, fail
return None
# Out of statements to look at
return None
def parse_table_BASE_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("HorizAxis.BaseTagList"):
horiz_bases = self.parse_base_tag_list_()
elif self.is_cur_keyword_("HorizAxis.BaseScriptList"):
horiz_scripts = self.parse_base_script_list_(len(horiz_bases))
statements.append(
self.ast.BaseAxis(
horiz_bases,
horiz_scripts,
False,
location=self.cur_token_location_,
)
)
elif self.is_cur_keyword_("HorizAxis.MinMax"):
base_script_list = self.find_previous(statements, ast.BaseAxis)
if base_script_list is None:
raise FeatureLibError(
"MinMax must be preceded by BaseScriptList",
self.cur_token_location_,
)
if base_script_list.vertical:
raise FeatureLibError(
"HorizAxis.MinMax must be preceded by HorizAxis statements",
self.cur_token_location_,
)
base_script_list.minmax.append(self.parse_base_minmax_())
elif self.is_cur_keyword_("VertAxis.BaseTagList"):
vert_bases = self.parse_base_tag_list_()
elif self.is_cur_keyword_("VertAxis.BaseScriptList"):
vert_scripts = self.parse_base_script_list_(len(vert_bases))
statements.append(
self.ast.BaseAxis(
vert_bases,
vert_scripts,
True,
location=self.cur_token_location_,
)
)
elif self.is_cur_keyword_("VertAxis.MinMax"):
base_script_list = self.find_previous(statements, ast.BaseAxis)
if base_script_list is None:
raise FeatureLibError(
"MinMax must be preceded by BaseScriptList",
self.cur_token_location_,
)
if not base_script_list.vertical:
raise FeatureLibError(
"VertAxis.MinMax must be preceded by VertAxis statements",
self.cur_token_location_,
)
base_script_list.minmax.append(self.parse_base_minmax_())
elif self.cur_token_ == ";":
continue
def parse_table_OS_2_(self, table):
statements = table.statements
numbers = (
"FSType",
"TypoAscender",
"TypoDescender",
"TypoLineGap",
"winAscent",
"winDescent",
"XHeight",
"CapHeight",
"WeightClass",
"WidthClass",
"LowerOpSize",
"UpperOpSize",
)
ranges = ("UnicodeRange", "CodePageRange")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME:
key = self.cur_token_.lower()
value = None
if self.cur_token_ in numbers:
value = self.expect_number_()
elif self.is_cur_keyword_("Panose"):
value = []
for i in range(10):
value.append(self.expect_number_())
elif self.cur_token_ in ranges:
value = []
while self.next_token_ != ";":
value.append(self.expect_number_())
elif self.is_cur_keyword_("Vendor"):
value = self.expect_string_()
statements.append(
self.ast.OS2Field(key, value, location=self.cur_token_location_)
)
elif self.cur_token_ == ";":
continue
def parse_STAT_ElidedFallbackName(self):
assert self.is_cur_keyword_("ElidedFallbackName")
self.expect_symbol_("{")
names = []
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_()
if self.is_cur_keyword_("name"):
platformID, platEncID, langID, string = self.parse_stat_name_()
nameRecord = self.ast.STATNameStatement(
"stat",
platformID,
platEncID,
langID,
string,
location=self.cur_token_location_,
)
names.append(nameRecord)
else:
if self.cur_token_ != ";":
raise FeatureLibError(
f"Unexpected token {self.cur_token_} " f"in ElidedFallbackName",
self.cur_token_location_,
)
self.expect_symbol_("}")
if not names:
raise FeatureLibError('Expected "name"', self.cur_token_location_)
return names
def parse_STAT_design_axis(self):
assert self.is_cur_keyword_("DesignAxis")
names = []
axisTag = self.expect_tag_()
if (
axisTag not in ("ital", "opsz", "slnt", "wdth", "wght")
and not axisTag.isupper()
):
log.warning(f"Unregistered axis tag {axisTag} should be uppercase.")
axisOrder = self.expect_number_()
self.expect_symbol_("{")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_()
if self.cur_token_type_ is Lexer.COMMENT:
continue
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_stat_name_()
name = self.ast.STATNameStatement(
"stat", platformID, platEncID, langID, string, location=location
)
names.append(name)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
f'Expected "name", got {self.cur_token_}', self.cur_token_location_
)
self.expect_symbol_("}")
return self.ast.STATDesignAxisStatement(
axisTag, axisOrder, names, self.cur_token_location_
)
def parse_STAT_axis_value_(self):
assert self.is_cur_keyword_("AxisValue")
self.expect_symbol_("{")
locations = []
names = []
flags = 0
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
continue
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_stat_name_()
name = self.ast.STATNameStatement(
"stat", platformID, platEncID, langID, string, location=location
)
names.append(name)
elif self.is_cur_keyword_("location"):
location = self.parse_STAT_location()
locations.append(location)
elif self.is_cur_keyword_("flag"):
flags = self.expect_stat_flags()
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
f"Unexpected token {self.cur_token_} " f"in AxisValue",
self.cur_token_location_,
)
self.expect_symbol_("}")
if not names:
raise FeatureLibError('Expected "Axis Name"', self.cur_token_location_)
if not locations:
raise FeatureLibError('Expected "Axis location"', self.cur_token_location_)
if len(locations) > 1:
for location in locations:
if len(location.values) > 1:
raise FeatureLibError(
"Only one value is allowed in a "
"Format 4 Axis Value Record, but "
f"{len(location.values)} were found.",
self.cur_token_location_,
)
format4_tags = []
for location in locations:
tag = location.tag
if tag in format4_tags:
raise FeatureLibError(
f"Axis tag {tag} already " "defined.", self.cur_token_location_
)
format4_tags.append(tag)
return self.ast.STATAxisValueStatement(
names, locations, flags, self.cur_token_location_
)
def parse_STAT_location(self):
values = []
tag = self.expect_tag_()
if len(tag.strip()) != 4:
raise FeatureLibError(
f"Axis tag {self.cur_token_} must be 4 " "characters",
self.cur_token_location_,
)
while self.next_token_ != ";":
if self.next_token_type_ is Lexer.FLOAT:
value = self.expect_float_()
values.append(value)
elif self.next_token_type_ is Lexer.NUMBER:
value = self.expect_number_()
values.append(value)
else:
raise FeatureLibError(
f'Unexpected value "{self.next_token_}". '
"Expected integer or float.",
self.next_token_location_,
)
if len(values) == 3:
nominal, min_val, max_val = values
if nominal < min_val or nominal > max_val:
raise FeatureLibError(
f"Default value {nominal} is outside "
f"of specified range "
f"{min_val}-{max_val}.",
self.next_token_location_,
)
return self.ast.AxisValueLocationStatement(tag, values)
def parse_table_STAT_(self, table):
statements = table.statements
design_axes = []
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME:
if self.is_cur_keyword_("ElidedFallbackName"):
names = self.parse_STAT_ElidedFallbackName()
statements.append(self.ast.ElidedFallbackName(names))
elif self.is_cur_keyword_("ElidedFallbackNameID"):
value = self.expect_number_()
statements.append(self.ast.ElidedFallbackNameID(value))
self.expect_symbol_(";")
elif self.is_cur_keyword_("DesignAxis"):
designAxis = self.parse_STAT_design_axis()
design_axes.append(designAxis.tag)
statements.append(designAxis)
self.expect_symbol_(";")
elif self.is_cur_keyword_("AxisValue"):
axisValueRecord = self.parse_STAT_axis_value_()
for location in axisValueRecord.locations:
if location.tag not in design_axes:
# Tag must be defined in a DesignAxis before it
# can be referenced
raise FeatureLibError(
"DesignAxis not defined for " f"{location.tag}.",
self.cur_token_location_,
)
statements.append(axisValueRecord)
self.expect_symbol_(";")
else:
raise FeatureLibError(
f"Unexpected token {self.cur_token_}", self.cur_token_location_
)
elif self.cur_token_ == ";":
continue
def parse_base_tag_list_(self):
# Parses BASE table entries. (See `section 9.a `_)
assert self.cur_token_ in (
"HorizAxis.BaseTagList",
"VertAxis.BaseTagList",
), self.cur_token_
bases = []
while self.next_token_ != ";":
bases.append(self.expect_script_tag_())
self.expect_symbol_(";")
return bases
def parse_base_script_list_(self, count):
assert self.cur_token_ in (
"HorizAxis.BaseScriptList",
"VertAxis.BaseScriptList",
), self.cur_token_
scripts = [self.parse_base_script_record_(count)]
while self.next_token_ == ",":
self.expect_symbol_(",")
scripts.append(self.parse_base_script_record_(count))
self.expect_symbol_(";")
return scripts
def parse_base_script_record_(self, count):
script_tag = self.expect_script_tag_()
base_tag = self.expect_script_tag_()
coords = [self.expect_number_() for i in range(count)]
return script_tag, base_tag, coords
def parse_base_minmax_(self):
script_tag = self.expect_script_tag_()
language = self.expect_language_tag_()
min_coord = self.expect_number_()
self.advance_lexer_()
if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","):
raise FeatureLibError(
"Expected a comma between min and max coordinates",
self.cur_token_location_,
)
max_coord = self.expect_number_()
if self.next_token_ == ",": # feature tag...
raise FeatureLibError(
"Feature tags are not yet supported in BASE table",
self.cur_token_location_,
)
return script_tag, language, min_coord, max_coord
def parse_device_(self):
result = None
self.expect_symbol_("<")
self.expect_keyword_("device")
if self.next_token_ == "NULL":
self.expect_keyword_("NULL")
else:
result = [(self.expect_number_(), self.expect_number_())]
while self.next_token_ == ",":
self.expect_symbol_(",")
result.append((self.expect_number_(), self.expect_number_()))
result = tuple(result) # make it hashable
self.expect_symbol_(">")
return result
def is_next_value_(self):
return (
self.next_token_type_ is Lexer.NUMBER
or self.next_token_ == "<"
or self.next_token_ == "("
)
def parse_valuerecord_(self, vertical):
if (
self.next_token_type_ is Lexer.SYMBOL and self.next_token_ == "("
) or self.next_token_type_ is Lexer.NUMBER:
number, location = (
self.expect_number_(variable=True),
self.cur_token_location_,
)
if vertical:
val = self.ast.ValueRecord(
yAdvance=number, vertical=vertical, location=location
)
else:
val = self.ast.ValueRecord(
xAdvance=number, vertical=vertical, location=location
)
return val
self.expect_symbol_("<")
location = self.cur_token_location_
if self.next_token_type_ is Lexer.NAME:
name = self.expect_name_()
if name == "NULL":
self.expect_symbol_(">")
return self.ast.ValueRecord()
vrd = self.valuerecords_.resolve(name)
if vrd is None:
raise FeatureLibError(
'Unknown valueRecordDef "%s"' % name, self.cur_token_location_
)
value = vrd.value
xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
else:
xPlacement, yPlacement, xAdvance, yAdvance = (
self.expect_number_(variable=True),
self.expect_number_(variable=True),
self.expect_number_(variable=True),
self.expect_number_(variable=True),
)
if self.next_token_ == "<":
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
self.parse_device_(),
self.parse_device_(),
self.parse_device_(),
self.parse_device_(),
)
allDeltas = sorted(
[
delta
for size, delta in (xPlaDevice if xPlaDevice else ())
+ (yPlaDevice if yPlaDevice else ())
+ (xAdvDevice if xAdvDevice else ())
+ (yAdvDevice if yAdvDevice else ())
]
)
if allDeltas[0] < -128 or allDeltas[-1] > 127:
raise FeatureLibError(
"Device value out of valid range (-128..127)",
self.cur_token_location_,
)
else:
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (None, None, None, None)
self.expect_symbol_(">")
return self.ast.ValueRecord(
xPlacement,
yPlacement,
xAdvance,
yAdvance,
xPlaDevice,
yPlaDevice,
xAdvDevice,
yAdvDevice,
vertical=vertical,
location=location,
)
def parse_valuerecord_definition_(self, vertical):
# Parses a named value record definition. (See section `2.e.v `_)
assert self.is_cur_keyword_("valueRecordDef")
location = self.cur_token_location_
value = self.parse_valuerecord_(vertical)
name = self.expect_name_()
self.expect_symbol_(";")
vrd = self.ast.ValueRecordDefinition(name, value, location=location)
self.valuerecords_.define(name, vrd)
return vrd
def parse_languagesystem_(self):
assert self.cur_token_ == "languagesystem"
location = self.cur_token_location_
script = self.expect_script_tag_()
language = self.expect_language_tag_()
self.expect_symbol_(";")
return self.ast.LanguageSystemStatement(script, language, location=location)
def parse_feature_block_(self, variation=False):
if variation:
assert self.cur_token_ == "variation"
else:
assert self.cur_token_ == "feature"
location = self.cur_token_location_
tag = self.expect_tag_()
vertical = tag in {"vkrn", "vpal", "vhal", "valt"}
stylisticset = None
cv_feature = None
size_feature = False
if tag in self.SS_FEATURE_TAGS:
stylisticset = tag
elif tag in self.CV_FEATURE_TAGS:
cv_feature = tag
elif tag == "size":
size_feature = True
if variation:
conditionset = self.expect_name_()
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
if variation:
block = self.ast.VariationBlock(
tag, conditionset, use_extension=use_extension, location=location
)
else:
block = self.ast.FeatureBlock(
tag, use_extension=use_extension, location=location
)
self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature)
return block
def parse_feature_reference_(self):
assert self.cur_token_ == "feature", self.cur_token_
location = self.cur_token_location_
featureName = self.expect_tag_()
self.expect_symbol_(";")
return self.ast.FeatureReferenceStatement(featureName, location=location)
def parse_featureNames_(self, tag):
"""Parses a ``featureNames`` statement found in stylistic set features.
See section `8.c `_.
"""
assert self.cur_token_ == "featureNames", self.cur_token_
block = self.ast.NestedBlock(
tag, self.cur_token_, location=self.cur_token_location_
)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
block.statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
block.statements.append(
self.ast.FeatureNameStatement(
tag, platformID, platEncID, langID, string, location=location
)
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError('Expected "name"', self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvParameters_(self, tag):
# Parses a ``cvParameters`` block found in Character Variant features.
# See section `8.d `_.
assert self.cur_token_ == "cvParameters", self.cur_token_
block = self.ast.NestedBlock(
tag, self.cur_token_, location=self.cur_token_location_
)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_(
{
"FeatUILabelNameID",
"FeatUITooltipTextNameID",
"SampleTextNameID",
"ParamUILabelNameID",
}
):
statements.append(self.parse_cvNameIDs_(tag, self.cur_token_))
elif self.is_cur_keyword_("Character"):
statements.append(self.parse_cvCharacter_(tag))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected statement: got {} {}".format(
self.cur_token_type_, self.cur_token_
),
self.cur_token_location_,
)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvNameIDs_(self, tag, block_name):
assert self.cur_token_ == block_name, self.cur_token_
block = self.ast.NestedBlock(tag, block_name, location=self.cur_token_location_)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
block.statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
block.statements.append(
self.ast.CVParametersNameStatement(
tag,
platformID,
platEncID,
langID,
string,
block_name,
location=location,
)
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError('Expected "name"', self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvCharacter_(self, tag):
assert self.cur_token_ == "Character", self.cur_token_
location, character = self.cur_token_location_, self.expect_any_number_()
self.expect_symbol_(";")
if not (0xFFFFFF >= character >= 0):
raise FeatureLibError(
"Character value must be between "
"{:#x} and {:#x}".format(0, 0xFFFFFF),
location,
)
return self.ast.CharacterStatement(character, tag, location=location)
def parse_FontRevision_(self):
# Parses a ``FontRevision`` statement found in the head table. See
# `section 9.c `_.
assert self.cur_token_ == "FontRevision", self.cur_token_
location, version = self.cur_token_location_, self.expect_float_()
self.expect_symbol_(";")
if version <= 0:
raise FeatureLibError("Font revision numbers must be positive", location)
return self.ast.FontRevisionStatement(version, location=location)
def parse_conditionset_(self):
name = self.expect_name_()
conditions = {}
self.expect_symbol_("{")
while self.next_token_ != "}":
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.NAME:
raise FeatureLibError("Expected an axis name", self.cur_token_location_)
axis = self.cur_token_
if axis in conditions:
raise FeatureLibError(
f"Repeated condition for axis {axis}", self.cur_token_location_
)
if self.next_token_type_ is Lexer.FLOAT:
min_value = self.expect_float_()
elif self.next_token_type_ is Lexer.NUMBER:
min_value = self.expect_number_(variable=False)
if self.next_token_type_ is Lexer.FLOAT:
max_value = self.expect_float_()
elif self.next_token_type_ is Lexer.NUMBER:
max_value = self.expect_number_(variable=False)
self.expect_symbol_(";")
conditions[axis] = (min_value, max_value)
self.expect_symbol_("}")
finalname = self.expect_name_()
if finalname != name:
raise FeatureLibError('Expected "%s"' % name, self.cur_token_location_)
return self.ast.ConditionsetStatement(name, conditions)
def parse_block_(
self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None
):
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_({"enum", "enumerate"}):
statements.append(self.parse_enumerate_(vertical=vertical))
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_reference_())
elif self.is_cur_keyword_("ignore"):
statements.append(self.parse_ignore_())
elif self.is_cur_keyword_("language"):
statements.append(self.parse_language_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical))
elif self.is_cur_keyword_("lookupflag"):
statements.append(self.parse_lookupflag_())
elif self.is_cur_keyword_("markClass"):
statements.append(self.parse_markClass_())
elif self.is_cur_keyword_({"pos", "position"}):
statements.append(
self.parse_position_(enumerated=False, vertical=vertical)
)
elif self.is_cur_keyword_("script"):
statements.append(self.parse_script_())
elif self.is_cur_keyword_({"sub", "substitute", "rsub", "reversesub"}):
statements.append(self.parse_substitute_())
elif self.is_cur_keyword_("subtable"):
statements.append(self.parse_subtable_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(self.parse_valuerecord_definition_(vertical))
elif stylisticset and self.is_cur_keyword_("featureNames"):
statements.append(self.parse_featureNames_(stylisticset))
elif cv_feature and self.is_cur_keyword_("cvParameters"):
statements.append(self.parse_cvParameters_(cv_feature))
elif size_feature and self.is_cur_keyword_("parameters"):
statements.append(self.parse_size_parameters_())
elif size_feature and self.is_cur_keyword_("sizemenuname"):
statements.append(self.parse_size_menuname_())
elif (
self.cur_token_type_ is Lexer.NAME
and self.cur_token_ in self.extensions
):
statements.append(self.extensions[self.cur_token_](self))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected glyph class definition or statement: got {} {}".format(
self.cur_token_type_, self.cur_token_
),
self.cur_token_location_,
)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
name = self.expect_name_()
if name != block.name.strip():
raise FeatureLibError(
'Expected "%s"' % block.name.strip(), self.cur_token_location_
)
self.expect_symbol_(";")
def is_cur_keyword_(self, k):
if self.cur_token_type_ is Lexer.NAME:
if isinstance(k, type("")): # basestring is gone in Python3
return self.cur_token_ == k
else:
return self.cur_token_ in k
return False
def expect_class_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.GLYPHCLASS:
raise FeatureLibError("Expected @NAME", self.cur_token_location_)
return self.cur_token_
def expect_cid_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.CID:
return self.cur_token_
raise FeatureLibError("Expected a CID", self.cur_token_location_)
def expect_filename_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.FILENAME:
raise FeatureLibError("Expected file name", self.cur_token_location_)
return self.cur_token_
def expect_glyph_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
return self.cur_token_.lstrip("\\")
elif self.cur_token_type_ is Lexer.CID:
return "cid%05d" % self.cur_token_
raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_)
def check_glyph_name_in_glyph_set(self, *names):
"""Adds a glyph name (just `start`) or glyph names of a
range (`start` and `end`) which are not in the glyph set
to the "missing list" for future error reporting.
If no glyph set is present, does nothing.
"""
if self.glyphNames_:
for name in names:
if name in self.glyphNames_:
continue
if name not in self.missing:
self.missing[name] = self.cur_token_location_
def expect_markClass_reference_(self):
name = self.expect_class_name_()
mc = self.glyphclasses_.resolve(name)
if mc is None:
raise FeatureLibError(
"Unknown markClass @%s" % name, self.cur_token_location_
)
if not isinstance(mc, self.ast.MarkClass):
raise FeatureLibError(
"@%s is not a markClass" % name, self.cur_token_location_
)
return mc
def expect_tag_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.NAME:
raise FeatureLibError("Expected a tag", self.cur_token_location_)
if len(self.cur_token_) > 4:
raise FeatureLibError(
"Tags cannot be longer than 4 characters", self.cur_token_location_
)
return (self.cur_token_ + " ")[:4]
def expect_script_tag_(self):
tag = self.expect_tag_()
if tag == "dflt":
raise FeatureLibError(
'"dflt" is not a valid script tag; use "DFLT" instead',
self.cur_token_location_,
)
return tag
def expect_language_tag_(self):
tag = self.expect_tag_()
if tag == "DFLT":
raise FeatureLibError(
'"DFLT" is not a valid language tag; use "dflt" instead',
self.cur_token_location_,
)
return tag
def expect_symbol_(self, symbol):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
return symbol
raise FeatureLibError("Expected '%s'" % symbol, self.cur_token_location_)
def expect_keyword_(self, keyword):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
return self.cur_token_
raise FeatureLibError('Expected "%s"' % keyword, self.cur_token_location_)
def expect_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
return self.cur_token_
raise FeatureLibError("Expected a name", self.cur_token_location_)
def expect_number_(self, variable=False):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NUMBER:
return self.cur_token_
if variable and self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "(":
return self.expect_variable_scalar_()
raise FeatureLibError("Expected a number", self.cur_token_location_)
def expect_variable_scalar_(self):
self.advance_lexer_() # "("
scalar = VariableScalar()
while True:
if self.cur_token_type_ == Lexer.SYMBOL and self.cur_token_ == ")":
break
location, value = self.expect_master_()
scalar.add_value(location, value)
return scalar
def expect_master_(self):
location = {}
while True:
if self.cur_token_type_ is not Lexer.NAME:
raise FeatureLibError("Expected an axis name", self.cur_token_location_)
axis = self.cur_token_
self.advance_lexer_()
if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "="):
raise FeatureLibError(
"Expected an equals sign", self.cur_token_location_
)
value = self.expect_number_()
location[axis] = value
if self.next_token_type_ is Lexer.NAME and self.next_token_[0] == ":":
# Lexer has just read the value as a glyph name. We'll correct it later
break
self.advance_lexer_()
if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","):
raise FeatureLibError(
"Expected an comma or an equals sign", self.cur_token_location_
)
self.advance_lexer_()
self.advance_lexer_()
value = int(self.cur_token_[1:])
self.advance_lexer_()
return location, value
def expect_any_number_(self):
self.advance_lexer_()
if self.cur_token_type_ in Lexer.NUMBERS:
return self.cur_token_
raise FeatureLibError(
"Expected a decimal, hexadecimal or octal number", self.cur_token_location_
)
def expect_float_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.FLOAT:
return self.cur_token_
raise FeatureLibError(
"Expected a floating-point number", self.cur_token_location_
)
def expect_decipoint_(self):
if self.next_token_type_ == Lexer.FLOAT:
return self.expect_float_()
elif self.next_token_type_ is Lexer.NUMBER:
return self.expect_number_() / 10
else:
raise FeatureLibError(
"Expected an integer or floating-point number", self.cur_token_location_
)
def expect_stat_flags(self):
value = 0
flags = {
"OlderSiblingFontAttribute": 1,
"ElidableAxisValueName": 2,
}
while self.next_token_ != ";":
if self.next_token_ in flags:
name = self.expect_name_()
value = value | flags[name]
else:
raise FeatureLibError(
f"Unexpected STAT flag {self.cur_token_}", self.cur_token_location_
)
return value
def expect_stat_values_(self):
if self.next_token_type_ == Lexer.FLOAT:
return self.expect_float_()
elif self.next_token_type_ is Lexer.NUMBER:
return self.expect_number_()
else:
raise FeatureLibError(
"Expected an integer or floating-point number", self.cur_token_location_
)
def expect_string_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.STRING:
return self.cur_token_
raise FeatureLibError("Expected a string", self.cur_token_location_)
def advance_lexer_(self, comments=False):
if comments and self.cur_comments_:
self.cur_token_type_ = Lexer.COMMENT
self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0)
return
else:
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
self.next_token_type_,
self.next_token_,
self.next_token_location_,
)
while True:
try:
(
self.next_token_type_,
self.next_token_,
self.next_token_location_,
) = next(self.lexer_)
except StopIteration:
self.next_token_type_, self.next_token_ = (None, None)
if self.next_token_type_ != Lexer.COMMENT:
break
self.cur_comments_.append((self.next_token_, self.next_token_location_))
@staticmethod
def reverse_string_(s):
"""'abc' --> 'cba'"""
return "".join(reversed(list(s)))
def make_cid_range_(self, location, start, limit):
"""(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]"""
result = list()
if start > limit:
raise FeatureLibError(
"Bad range: start should be less than limit", location
)
for cid in range(start, limit + 1):
result.append("cid%05d" % cid)
return result
def make_glyph_range_(self, location, start, limit):
"""(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]"""
result = list()
if len(start) != len(limit):
raise FeatureLibError(
'Bad range: "%s" and "%s" should have the same length' % (start, limit),
location,
)
rev = self.reverse_string_
prefix = os.path.commonprefix([start, limit])
suffix = rev(os.path.commonprefix([rev(start), rev(limit)]))
if len(suffix) > 0:
start_range = start[len(prefix) : -len(suffix)]
limit_range = limit[len(prefix) : -len(suffix)]
else:
start_range = start[len(prefix) :]
limit_range = limit[len(prefix) :]
if start_range >= limit_range:
raise FeatureLibError(
"Start of range must be smaller than its end", location
)
uppercase = re.compile(r"^[A-Z]$")
if uppercase.match(start_range) and uppercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.append("%s%c%s" % (prefix, c, suffix))
return result
lowercase = re.compile(r"^[a-z]$")
if lowercase.match(start_range) and lowercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.append("%s%c%s" % (prefix, c, suffix))
return result
digits = re.compile(r"^[0-9]{1,3}$")
if digits.match(start_range) and digits.match(limit_range):
for i in range(int(start_range, 10), int(limit_range, 10) + 1):
number = ("000" + str(i))[-len(start_range) :]
result.append("%s%s%s" % (prefix, number, suffix))
return result
raise FeatureLibError('Bad range: "%s-%s"' % (start, limit), location)
class SymbolTable(object):
def __init__(self):
self.scopes_ = [{}]
def enter_scope(self):
self.scopes_.append({})
def exit_scope(self):
self.scopes_.pop()
def define(self, name, item):
self.scopes_[-1][name] = item
def resolve(self, name):
for scope in reversed(self.scopes_):
item = scope.get(name)
if item:
return item
return None
from fontTools.varLib.models import VariationModel, normalizeValue, piecewiseLinearMap
def Location(loc):
return tuple(sorted(loc.items()))
class VariableScalar:
"""A scalar with different values at different points in the designspace."""
def __init__(self, location_value={}):
self.values = {}
self.axes = {}
for location, value in location_value.items():
self.add_value(location, value)
def __repr__(self):
items = []
for location, value in self.values.items():
loc = ",".join(["%s=%i" % (ax, loc) for ax, loc in location])
items.append("%s:%i" % (loc, value))
return "(" + (" ".join(items)) + ")"
@property
def does_vary(self):
values = list(self.values.values())
return any(v != values[0] for v in values[1:])
@property
def axes_dict(self):
if not self.axes:
raise ValueError(
".axes must be defined on variable scalar before interpolating"
)
return {ax.axisTag: ax for ax in self.axes}
def _normalized_location(self, location):
location = self.fix_location(location)
normalized_location = {}
for axtag in location.keys():
if axtag not in self.axes_dict:
raise ValueError("Unknown axis %s in %s" % (axtag, location))
axis = self.axes_dict[axtag]
normalized_location[axtag] = normalizeValue(
location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue)
)
return Location(normalized_location)
def fix_location(self, location):
location = dict(location)
for tag, axis in self.axes_dict.items():
if tag not in location:
location[tag] = axis.defaultValue
return location
def add_value(self, location, value):
if self.axes:
location = self.fix_location(location)
self.values[Location(location)] = value
def fix_all_locations(self):
self.values = {
Location(self.fix_location(l)): v for l, v in self.values.items()
}
@property
def default(self):
self.fix_all_locations()
key = Location({ax.axisTag: ax.defaultValue for ax in self.axes})
if key not in self.values:
raise ValueError("Default value could not be found")
# I *guess* we could interpolate one, but I don't know how.
return self.values[key]
def value_at_location(self, location, model_cache=None, avar=None):
loc = Location(location)
if loc in self.values.keys():
return self.values[loc]
values = list(self.values.values())
loc = dict(self._normalized_location(loc))
return self.model(model_cache, avar).interpolateFromMasters(loc, values)
def model(self, model_cache=None, avar=None):
if model_cache is not None:
key = tuple(self.values.keys())
if key in model_cache:
return model_cache[key]
locations = [dict(self._normalized_location(k)) for k in self.values.keys()]
if avar is not None:
mapping = avar.segments
locations = [
{
k: piecewiseLinearMap(v, mapping[k]) if k in mapping else v
for k, v in location.items()
}
for location in locations
]
m = VariationModel(locations)
if model_cache is not None:
model_cache[key] = m
return m
def get_deltas_and_supports(self, model_cache=None, avar=None):
values = list(self.values.values())
return self.model(model_cache, avar).getDeltasAndSupports(values)
def add_to_variation_store(self, store_builder, model_cache=None, avar=None):
deltas, supports = self.get_deltas_and_supports(model_cache, avar)
store_builder.setSupports(supports)
index = store_builder.storeDeltas(deltas)
return int(self.default), index
"""fontTools.feaLib -- a package for dealing with OpenType feature files."""
# The structure of OpenType feature files is defined here:
# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html
from fontTools.ttLib import TTFont
from fontTools.feaLib.builder import addOpenTypeFeatures, Builder
from fontTools.feaLib.error import FeatureLibError
from fontTools import configLogger
from fontTools.misc.cliTools import makeOutputFileName
import sys
import argparse
import logging
log = logging.getLogger("fontTools.feaLib")
def main(args=None):
"""Add features from a feature file (.fea) into an OTF font"""
parser = argparse.ArgumentParser(
description="Use fontTools to compile OpenType feature files (*.fea)."
)
parser.add_argument(
"input_fea", metavar="FEATURES", help="Path to the feature file"
)
parser.add_argument(
"input_font", metavar="INPUT_FONT", help="Path to the input font"
)
parser.add_argument(
"-o",
"--output",
dest="output_font",
metavar="OUTPUT_FONT",
help="Path to the output font.",
)
parser.add_argument(
"-t",
"--tables",
metavar="TABLE_TAG",
choices=Builder.supportedTables,
nargs="+",
help="Specify the table(s) to be built.",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="Add source-level debugging information to font.",
)
parser.add_argument(
"-v",
"--verbose",
help="Increase the logger verbosity. Multiple -v " "options are allowed.",
action="count",
default=0,
)
parser.add_argument(
"--traceback", help="show traceback for exceptions.", action="store_true"
)
options = parser.parse_args(args)
levels = ["WARNING", "INFO", "DEBUG"]
configLogger(level=levels[min(len(levels) - 1, options.verbose)])
output_font = options.output_font or makeOutputFileName(options.input_font)
log.info("Compiling features to '%s'" % (output_font))
font = TTFont(options.input_font)
try:
addOpenTypeFeatures(
font, options.input_fea, tables=options.tables, debug=options.debug
)
except FeatureLibError as e:
if options.traceback:
raise
log.error(e)
sys.exit(1)
font.save(output_font)
if __name__ == "__main__":
sys.exit(main())
venv\Lib\site-packages\fontTools\merge\base.py
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import logging
log = logging.getLogger("fontTools.merge")
def add_method(*clazzes, **kwargs):
"""Returns a decorator function that adds a new method to one or
more classes."""
allowDefault = kwargs.get("allowDefaultTable", False)
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done:
continue # Support multiple names of a clazz
done.append(clazz)
assert allowDefault or clazz != DefaultTable, "Oops, table class not found."
assert (
method.__name__ not in clazz.__dict__
), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
def mergeObjects(lst):
lst = [item for item in lst if item is not NotImplemented]
if not lst:
return NotImplemented
lst = [item for item in lst if item is not None]
if not lst:
return None
clazz = lst[0].__class__
assert all(type(item) == clazz for item in lst), lst
logic = clazz.mergeMap
returnTable = clazz()
returnDict = {}
allKeys = set.union(set(), *(vars(table).keys() for table in lst))
for key in allKeys:
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic["*"]
except KeyError:
raise Exception(
"Don't know how to merge key %s of class %s" % (key, clazz.__name__)
)
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
if value is not NotImplemented:
returnDict[key] = value
returnTable.__dict__ = returnDict
return returnTable
@add_method(DefaultTable, allowDefaultTable=True)
def merge(self, m, tables):
if not hasattr(self, "mergeMap"):
log.info("Don't know how to merge '%s'.", self.tableTag)
return NotImplemented
logic = self.mergeMap
if isinstance(logic, dict):
return m.mergeObjects(self, self.mergeMap, tables)
else:
return logic(tables)
venv\Lib\site-packages\fontTools\merge\cmap.py
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools.merge.unicode import is_Default_Ignorable
from fontTools.pens.recordingPen import DecomposingRecordingPen
import logging
log = logging.getLogger("fontTools.merge")
def computeMegaGlyphOrder(merger, glyphOrders):
"""Modifies passed-in glyphOrders to reflect new glyph names.
Stores merger.glyphOrder."""
megaOrder = {}
for glyphOrder in glyphOrders:
for i, glyphName in enumerate(glyphOrder):
if glyphName in megaOrder:
n = megaOrder[glyphName]
while (glyphName + "." + repr(n)) in megaOrder:
n += 1
megaOrder[glyphName] = n
glyphName += "." + repr(n)
glyphOrder[i] = glyphName
megaOrder[glyphName] = 1
merger.glyphOrder = megaOrder = list(megaOrder.keys())
def _glyphsAreSame(
glyphSet1,
glyphSet2,
glyph1,
glyph2,
advanceTolerance=0.05,
advanceToleranceEmpty=0.20,
):
pen1 = DecomposingRecordingPen(glyphSet1)
pen2 = DecomposingRecordingPen(glyphSet2)
g1 = glyphSet1[glyph1]
g2 = glyphSet2[glyph2]
g1.draw(pen1)
g2.draw(pen2)
if pen1.value != pen2.value:
return False
# Allow more width tolerance for glyphs with no ink
tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
# TODO Warn if advances not the same but within tolerance.
if abs(g1.width - g2.width) > g1.width * tolerance:
return False
if hasattr(g1, "height") and g1.height is not None:
if abs(g1.height - g2.height) > g1.height * tolerance:
return False
return True
def computeMegaUvs(merger, uvsTables):
"""Returns merged UVS subtable (cmap format=14)."""
uvsDict = {}
cmap = merger.cmap
for table in uvsTables:
for variationSelector, uvsMapping in table.uvsDict.items():
if variationSelector not in uvsDict:
uvsDict[variationSelector] = {}
for unicodeValue, glyphName in uvsMapping:
if cmap.get(unicodeValue) == glyphName:
# this is a default variation
glyphName = None
# prefer previous glyph id if both fonts defined UVS
if unicodeValue not in uvsDict[variationSelector]:
uvsDict[variationSelector][unicodeValue] = glyphName
for variationSelector in uvsDict:
uvsDict[variationSelector] = [*uvsDict[variationSelector].items()]
return uvsDict
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
# Unicode BMP-only and Unicode Full Repertoire semantics.
# Cf. OpenType spec for "Platform specific encodings":
# https://docs.microsoft.com/en-us/typography/opentype/spec/name
class _CmapUnicodePlatEncodings:
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
UVS = {(14, 0, 5)}
def computeMegaCmap(merger, cmapTables):
"""Sets merger.cmap and merger.uvsDict."""
# TODO Handle format=14.
# Only merge format 4 and 12 Unicode subtables, ignores all other subtables
# If there is a format 12 table for a font, ignore the format 4 table of it
chosenCmapTables = []
chosenUvsTables = []
for fontIdx, table in enumerate(cmapTables):
format4 = None
format12 = None
format14 = None
for subtable in table.tables:
properties = (subtable.format, subtable.platformID, subtable.platEncID)
if properties in _CmapUnicodePlatEncodings.BMP:
format4 = subtable
elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
format12 = subtable
elif properties in _CmapUnicodePlatEncodings.UVS:
format14 = subtable
else:
log.warning(
"Dropped cmap subtable from font '%s':\t"
"format %2s, platformID %2s, platEncID %2s",
fontIdx,
subtable.format,
subtable.platformID,
subtable.platEncID,
)
if format12 is not None:
chosenCmapTables.append((format12, fontIdx))
elif format4 is not None:
chosenCmapTables.append((format4, fontIdx))
if format14 is not None:
chosenUvsTables.append(format14)
# Build the unicode mapping
merger.cmap = cmap = {}
fontIndexForGlyph = {}
glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None
for table, fontIdx in chosenCmapTables:
# handle duplicates
for uni, gid in table.cmap.items():
oldgid = cmap.get(uni, None)
if oldgid is None:
cmap[uni] = gid
fontIndexForGlyph[gid] = fontIdx
elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
continue
elif oldgid != gid:
# Char previously mapped to oldgid, now to gid.
# Record, to fix up in GSUB 'locl' later.
if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
if glyphSets is not None:
oldFontIdx = fontIndexForGlyph[oldgid]
for idx in (fontIdx, oldFontIdx):
if glyphSets[idx] is None:
glyphSets[idx] = merger.fonts[idx].getGlyphSet()
# if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
# continue
merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
# Char previously mapped to oldgid but oldgid is already remapped to a different
# gid, because of another Unicode character.
# TODO: Try harder to do something about these.
log.warning(
"Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid
)
merger.uvsDict = computeMegaUvs(merger, chosenUvsTables)
def renameCFFCharStrings(merger, glyphOrder, cffTable):
"""Rename topDictIndex charStrings based on glyphOrder."""
td = cffTable.cff.topDictIndex[0]
charStrings = {}
for i, v in enumerate(td.CharStrings.charStrings.values()):
glyphName = glyphOrder[i]
charStrings[glyphName] = v
td.CharStrings.charStrings = charStrings
td.charset = list(glyphOrder)
venv\Lib\site-packages\fontTools\merge\layout.py
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools import ttLib
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.ttLib.tables import otTables
from fontTools.merge.base import add_method, mergeObjects
from fontTools.merge.util import *
import logging
log = logging.getLogger("fontTools.merge")
def mergeLookupLists(lst):
# TODO Do smarter merge.
return sumLists(lst)
def mergeFeatures(lst):
assert lst
self = otTables.Feature()
self.FeatureParams = None
self.LookupListIndex = mergeLookupLists(
[l.LookupListIndex for l in lst if l.LookupListIndex]
)
self.LookupCount = len(self.LookupListIndex)
return self
def mergeFeatureLists(lst):
d = {}
for l in lst:
for f in l:
tag = f.FeatureTag
if tag not in d:
d[tag] = []
d[tag].append(f.Feature)
ret = []
for tag in sorted(d.keys()):
rec = otTables.FeatureRecord()
rec.FeatureTag = tag
rec.Feature = mergeFeatures(d[tag])
ret.append(rec)
return ret
def mergeLangSyses(lst):
assert lst
# TODO Support merging ReqFeatureIndex
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists(
[l.FeatureIndex for l in lst if l.FeatureIndex]
)
self.FeatureCount = len(self.FeatureIndex)
return self
def mergeScripts(lst):
assert lst
if len(lst) == 1:
return lst[0]
langSyses = {}
for sr in lst:
for lsr in sr.LangSysRecord:
if lsr.LangSysTag not in langSyses:
langSyses[lsr.LangSysTag] = []
langSyses[lsr.LangSysTag].append(lsr.LangSys)
lsrecords = []
for tag, langSys_list in sorted(langSyses.items()):
lsr = otTables.LangSysRecord()
lsr.LangSys = mergeLangSyses(langSys_list)
lsr.LangSysTag = tag
lsrecords.append(lsr)
self = otTables.Script()
self.LangSysRecord = lsrecords
self.LangSysCount = len(lsrecords)
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
if dfltLangSyses:
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
else:
self.DefaultLangSys = None
return self
def mergeScriptRecords(lst):
d = {}
for l in lst:
for s in l:
tag = s.ScriptTag
if tag not in d:
d[tag] = []
d[tag].append(s.Script)
ret = []
for tag in sorted(d.keys()):
rec = otTables.ScriptRecord()
rec.ScriptTag = tag
rec.Script = mergeScripts(d[tag])
ret.append(rec)
return ret
otTables.ScriptList.mergeMap = {
"ScriptCount": lambda lst: None, # TODO
"ScriptRecord": mergeScriptRecords,
}
otTables.BaseScriptList.mergeMap = {
"BaseScriptCount": lambda lst: None, # TODO
# TODO: Merge duplicate entries
"BaseScriptRecord": lambda lst: sorted(
sumLists(lst), key=lambda s: s.BaseScriptTag
),
}
otTables.FeatureList.mergeMap = {
"FeatureCount": sum,
"FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
}
otTables.LookupList.mergeMap = {
"LookupCount": sum,
"Lookup": sumLists,
}
otTables.Coverage.mergeMap = {
"Format": min,
"glyphs": sumLists,
}
otTables.ClassDef.mergeMap = {
"Format": min,
"classDefs": sumDicts,
}
otTables.LigCaretList.mergeMap = {
"Coverage": mergeObjects,
"LigGlyphCount": sum,
"LigGlyph": sumLists,
}
otTables.AttachList.mergeMap = {
"Coverage": mergeObjects,
"GlyphCount": sum,
"AttachPoint": sumLists,
}
# XXX Renumber MarkFilterSets of lookups
otTables.MarkGlyphSetsDef.mergeMap = {
"MarkSetTableFormat": equal,
"MarkSetCount": sum,
"Coverage": sumLists,
}
otTables.Axis.mergeMap = {
"*": mergeObjects,
}
# XXX Fix BASE table merging
otTables.BaseTagList.mergeMap = {
"BaseTagCount": sum,
"BaselineTag": sumLists,
}
otTables.GDEF.mergeMap = otTables.GSUB.mergeMap = otTables.GPOS.mergeMap = (
otTables.BASE.mergeMap
) = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
"*": mergeObjects,
"Version": max,
}
ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass("GSUB").mergeMap = (
ttLib.getTableClass("GPOS").mergeMap
) = ttLib.getTableClass("BASE").mergeMap = ttLib.getTableClass(
"JSTF"
).mergeMap = ttLib.getTableClass(
"MATH"
).mergeMap = {
"tableTag": onlyExisting(equal), # XXX clean me up
"table": mergeObjects,
}
@add_method(ttLib.getTableClass("GSUB"))
def merge(self, m, tables):
assert len(tables) == len(m.duplicateGlyphsPerFont)
for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
if not dups:
continue
if table is None or table is NotImplemented:
log.warning(
"Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",
m.fonts[i]._merger__name,
dups,
)
continue
synthFeature = None
synthLookup = None
for script in table.table.ScriptList.ScriptRecord:
if script.ScriptTag == "DFLT":
continue # XXX
for langsys in [script.Script.DefaultLangSys] + [
l.LangSys for l in script.Script.LangSysRecord
]:
if langsys is None:
continue # XXX Create!
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]
assert len(feature) <= 1
if feature:
feature = feature[0]
else:
if not synthFeature:
synthFeature = otTables.FeatureRecord()
synthFeature.FeatureTag = "locl"
f = synthFeature.Feature = otTables.Feature()
f.FeatureParams = None
f.LookupCount = 0
f.LookupListIndex = []
table.table.FeatureList.FeatureRecord.append(synthFeature)
table.table.FeatureList.FeatureCount += 1
feature = synthFeature
langsys.FeatureIndex.append(feature)
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
if not synthLookup:
subtable = otTables.SingleSubst()
subtable.mapping = dups
synthLookup = otTables.Lookup()
synthLookup.LookupFlag = 0
synthLookup.LookupType = 1
synthLookup.SubTableCount = 1
synthLookup.SubTable = [subtable]
if table.table.LookupList is None:
# mtiLib uses None as default value for LookupList,
# while feaLib points to an empty array with count 0
# TODO: make them do the same
table.table.LookupList = otTables.LookupList()
table.table.LookupList.Lookup = []
table.table.LookupList.LookupCount = 0
table.table.LookupList.Lookup.append(synthLookup)
table.table.LookupList.LookupCount += 1
if feature.Feature.LookupListIndex[:1] != [synthLookup]:
feature.Feature.LookupListIndex[:0] = [synthLookup]
feature.Feature.LookupCount += 1
DefaultTable.merge(self, m, tables)
return self
@add_method(
otTables.SingleSubst,
otTables.MultipleSubst,
otTables.AlternateSubst,
otTables.LigatureSubst,
otTables.ReverseChainSingleSubst,
otTables.SinglePos,
otTables.PairPos,
otTables.CursivePos,
otTables.MarkBasePos,
otTables.MarkLigPos,
otTables.MarkMarkPos,
)
def mapLookups(self, lookupMap):
pass
# Copied and trimmed down from subset.py
@add_method(
otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos,
)
def __merge_classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith("Subst"):
Typ = "Sub"
Type = "Subst"
else:
Typ = "Pos"
Type = "Pos"
if klass.__name__.startswith("Chain"):
Chain = "Chain"
else:
Chain = ""
ChainTyp = Chain + Typ
self.Typ = Typ
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type + "LookupRecord"
if Format == 1:
self.Rule = ChainTyp + "Rule"
self.RuleSet = ChainTyp + "RuleSet"
elif Format == 2:
self.Rule = ChainTyp + "ClassRule"
self.RuleSet = ChainTyp + "ClassSet"
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "_merge__ContextHelpers"):
self.__class__._merge__ContextHelpers = {}
if self.Format not in self.__class__._merge__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__._merge__ContextHelpers[self.Format] = helper
return self.__class__._merge__ContextHelpers[self.Format]
@add_method(
otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos,
)
def mapLookups(self, lookupMap):
c = self.__merge_classify_context()
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs:
continue
for r in getattr(rs, c.Rule):
if not r:
continue
for ll in getattr(r, c.LookupRecord):
if not ll:
continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
elif self.Format == 3:
for ll in getattr(self, c.LookupRecord):
if not ll:
continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
def mapLookups(self, lookupMap):
if self.Format == 1:
self.ExtSubTable.mapLookups(lookupMap)
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.Lookup)
def mapLookups(self, lookupMap):
for st in self.SubTable:
if not st:
continue
st.mapLookups(lookupMap)
@add_method(otTables.LookupList)
def mapLookups(self, lookupMap):
for l in self.Lookup:
if not l:
continue
l.mapLookups(lookupMap)
@add_method(otTables.Lookup)
def mapMarkFilteringSets(self, markFilteringSetMap):
if self.LookupFlag & 0x0010:
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
@add_method(otTables.LookupList)
def mapMarkFilteringSets(self, markFilteringSetMap):
for l in self.Lookup:
if not l:
continue
l.mapMarkFilteringSets(markFilteringSetMap)
@add_method(otTables.Feature)
def mapLookups(self, lookupMap):
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
@add_method(otTables.FeatureList)
def mapLookups(self, lookupMap):
for f in self.FeatureRecord:
if not f or not f.Feature:
continue
f.Feature.mapLookups(lookupMap)
@add_method(otTables.DefaultLangSys, otTables.LangSys)
def mapFeatures(self, featureMap):
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
if self.ReqFeatureIndex != 65535:
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
@add_method(otTables.Script)
def mapFeatures(self, featureMap):
if self.DefaultLangSys:
self.DefaultLangSys.mapFeatures(featureMap)
for l in self.LangSysRecord:
if not l or not l.LangSys:
continue
l.LangSys.mapFeatures(featureMap)
@add_method(otTables.ScriptList)
def mapFeatures(self, featureMap):
for s in self.ScriptRecord:
if not s or not s.Script:
continue
s.Script.mapFeatures(featureMap)
def layoutPreMerge(font):
# Map indices to references
GDEF = font.get("GDEF")
GSUB = font.get("GSUB")
GPOS = font.get("GPOS")
for t in [GSUB, GPOS]:
if not t:
continue
if t.table.LookupList:
lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
t.table.LookupList.mapLookups(lookupMap)
t.table.FeatureList.mapLookups(lookupMap)
if (
GDEF
and GDEF.table.Version >= 0x00010002
and GDEF.table.MarkGlyphSetsDef
):
markFilteringSetMap = {
i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)
}
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
if t.table.FeatureList and t.table.ScriptList:
featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)}
t.table.ScriptList.mapFeatures(featureMap)
# TODO FeatureParams nameIDs
def layoutPostMerge(font):
# Map references back to indices
GDEF = font.get("GDEF")
GSUB = font.get("GSUB")
GPOS = font.get("GPOS")
for t in [GSUB, GPOS]:
if not t:
continue
if t.table.FeatureList and t.table.ScriptList:
# Collect unregistered (new) features.
featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
# Record used features.
featureMap = AttendanceRecordingIdentityDict(
t.table.FeatureList.FeatureRecord
)
t.table.ScriptList.mapFeatures(featureMap)
usedIndices = featureMap.s
# Remove unused features
t.table.FeatureList.FeatureRecord = [
f
for i, f in enumerate(t.table.FeatureList.FeatureRecord)
if i in usedIndices
]
# Map back to indices.
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
if t.table.LookupList:
# Collect unregistered (new) lookups.
lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
# Record used lookups.
lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
usedIndices = lookupMap.s
# Remove unused lookups
t.table.LookupList.Lookup = [
l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices
]
# Map back to indices.
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = NonhashableDict(
GDEF.table.MarkGlyphSetsDef.Coverage
)
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
# TODO FeatureParams nameIDs
venv\Lib\site-packages\fontTools\merge\options.py
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
class Options(object):
class UnknownOptionError(Exception):
pass
def __init__(self, **kwargs):
self.verbose = False
self.timing = False
self.drop_tables = []
self.input_file = None
self.output_file = "merged.ttf"
self.import_file = None
self.set(**kwargs)
def set(self, **kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def parse_opts(self, argv, ignore_unknown=[]):
ret = []
opts = {}
for a in argv:
orig_a = a
if not a.startswith("--"):
ret.append(a)
continue
a = a[2:]
i = a.find("=")
op = "="
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1] + "=" # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i + 1 :]
ok = k
k = k.replace("-", "_")
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
ov = getattr(self, k)
if isinstance(ov, bool):
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(",")
if vv == [""]:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == "=":
v = vv
elif op == "+=":
v = ov
v.extend(vv)
elif op == "-=":
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
opts[k] = v
self.set(**opts)
return ret
venv\Lib\site-packages\fontTools\merge\tables.py
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools import ttLib, cffLib
from fontTools.misc.psCharStrings import T2WidthExtractor
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.merge.base import add_method, mergeObjects
from fontTools.merge.cmap import computeMegaCmap
from fontTools.merge.util import *
import logging
log = logging.getLogger("fontTools.merge")
ttLib.getTableClass("maxp").mergeMap = {
"*": max,
"tableTag": equal,
"tableVersion": equal,
"numGlyphs": sum,
"maxStorage": first,
"maxFunctionDefs": first,
"maxInstructionDefs": first,
# TODO When we correctly merge hinting data, update these values:
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
}
headFlagsMergeBitMap = {
"size": 16,
"*": bitwise_or,
1: bitwise_and, # Baseline at y = 0
2: bitwise_and, # lsb at x = 0
3: bitwise_and, # Force ppem to integer values. FIXME?
5: bitwise_and, # Font is vertical
6: lambda bit: 0, # Always set to zero
11: bitwise_and, # Font data is 'lossless'
13: bitwise_and, # Optimized for ClearType
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
15: lambda bit: 0, # Always set to zero
}
ttLib.getTableClass("head").mergeMap = {
"tableTag": equal,
"tableVersion": max,
"fontRevision": max,
"checkSumAdjustment": lambda lst: 0, # We need *something* here
"magicNumber": equal,
"flags": mergeBits(headFlagsMergeBitMap),
"unitsPerEm": equal,
"created": current_time,
"modified": current_time,
"xMin": min,
"yMin": min,
"xMax": max,
"yMax": max,
"macStyle": first,
"lowestRecPPEM": max,
"fontDirectionHint": lambda lst: 2,
"indexToLocFormat": first,
"glyphDataFormat": equal,
}
ttLib.getTableClass("hhea").mergeMap = {
"*": equal,
"tableTag": equal,
"tableVersion": max,
"ascent": max,
"descent": min,
"lineGap": max,
"advanceWidthMax": max,
"minLeftSideBearing": min,
"minRightSideBearing": min,
"xMaxExtent": max,
"caretSlopeRise": first,
"caretSlopeRun": first,
"caretOffset": first,
"numberOfHMetrics": recalculate,
}
ttLib.getTableClass("vhea").mergeMap = {
"*": equal,
"tableTag": equal,
"tableVersion": max,
"ascent": max,
"descent": min,
"lineGap": max,
"advanceHeightMax": max,
"minTopSideBearing": min,
"minBottomSideBearing": min,
"yMaxExtent": max,
"caretSlopeRise": first,
"caretSlopeRun": first,
"caretOffset": first,
"numberOfVMetrics": recalculate,
}
os2FsTypeMergeBitMap = {
"size": 16,
"*": lambda bit: 0,
1: bitwise_or, # no embedding permitted
2: bitwise_and, # allow previewing and printing documents
3: bitwise_and, # allow editing documents
8: bitwise_or, # no subsetting permitted
9: bitwise_or, # no embedding of outlines permitted
}
def mergeOs2FsType(lst):
lst = list(lst)
if all(item == 0 for item in lst):
return 0
# Compute least restrictive logic for each fsType value
for i in range(len(lst)):
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
if lst[i] & 0x000C:
lst[i] &= ~0x0002
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
elif lst[i] & 0x0008:
lst[i] |= 0x0004
# set bits 2 and 3 if everything is allowed
elif lst[i] == 0:
lst[i] = 0x000C
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
if fsType & 0x0002:
fsType &= ~0x000C
return fsType
ttLib.getTableClass("OS/2").mergeMap = {
"*": first,
"tableTag": equal,
"version": max,
"xAvgCharWidth": first, # Will be recalculated at the end on the merged font
"fsType": mergeOs2FsType, # Will be overwritten
"panose": first, # FIXME: should really be the first Latin font
"ulUnicodeRange1": bitwise_or,
"ulUnicodeRange2": bitwise_or,
"ulUnicodeRange3": bitwise_or,
"ulUnicodeRange4": bitwise_or,
"fsFirstCharIndex": min,
"fsLastCharIndex": max,
"sTypoAscender": max,
"sTypoDescender": min,
"sTypoLineGap": max,
"usWinAscent": max,
"usWinDescent": max,
# Version 1
"ulCodePageRange1": onlyExisting(bitwise_or),
"ulCodePageRange2": onlyExisting(bitwise_or),
# Version 2, 3, 4
"sxHeight": onlyExisting(max),
"sCapHeight": onlyExisting(max),
"usDefaultChar": onlyExisting(first),
"usBreakChar": onlyExisting(first),
"usMaxContext": onlyExisting(max),
# version 5
"usLowerOpticalPointSize": onlyExisting(min),
"usUpperOpticalPointSize": onlyExisting(max),
}
@add_method(ttLib.getTableClass("OS/2"))
def merge(self, m, tables):
DefaultTable.merge(self, m, tables)
if self.version < 2:
# bits 8 and 9 are reserved and should be set to zero
self.fsType &= ~0x0300
if self.version >= 3:
# Only one of bits 1, 2, and 3 may be set. We already take
# care of bit 1 implications in mergeOs2FsType. So unset
# bit 2 if bit 3 is already set.
if self.fsType & 0x0008:
self.fsType &= ~0x0004
return self
ttLib.getTableClass("post").mergeMap = {
"*": first,
"tableTag": equal,
"formatType": max,
"isFixedPitch": min,
"minMemType42": max,
"maxMemType42": lambda lst: 0,
"minMemType1": max,
"maxMemType1": lambda lst: 0,
"mapping": onlyExisting(sumDicts),
"extraNames": lambda lst: [],
}
ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {
"tableTag": equal,
"metrics": sumDicts,
}
ttLib.getTableClass("name").mergeMap = {
"tableTag": equal,
"names": first, # FIXME? Does mixing name records make sense?
}
ttLib.getTableClass("loca").mergeMap = {
"*": recalculate,
"tableTag": equal,
}
ttLib.getTableClass("glyf").mergeMap = {
"tableTag": equal,
"glyphs": sumDicts,
"glyphOrder": sumLists,
"_reverseGlyphOrder": recalculate,
"axisTags": equal,
}
@add_method(ttLib.getTableClass("glyf"))
def merge(self, m, tables):
for i, table in enumerate(tables):
for g in table.glyphs.values():
if i:
# Drop hints for all but first font, since
# we don't map functions / CVT values.
g.removeHinting()
# Expand composite glyphs to load their
# composite glyph names.
if g.isComposite():
g.expand(table)
return DefaultTable.merge(self, m, tables)
ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(
lst
) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass("CFF "))
def merge(self, m, tables):
if any(hasattr(table.cff[0], "FDSelect") for table in tables):
raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")
for table in tables:
table.cff.desubroutinize()
newcff = tables[0]
newfont = newcff.cff[0]
private = newfont.Private
newDefaultWidthX, newNominalWidthX = private.defaultWidthX, private.nominalWidthX
storedNamesStrings = []
glyphOrderStrings = []
glyphOrder = set(newfont.getGlyphOrder())
for name in newfont.strings.strings:
if name not in glyphOrder:
storedNamesStrings.append(name)
else:
glyphOrderStrings.append(name)
chrset = list(newfont.charset)
newcs = newfont.CharStrings
log.debug("FONT 0 CharStrings: %d.", len(newcs))
for i, table in enumerate(tables[1:], start=1):
font = table.cff[0]
defaultWidthX, nominalWidthX = (
font.Private.defaultWidthX,
font.Private.nominalWidthX,
)
widthsDiffer = (
defaultWidthX != newDefaultWidthX or nominalWidthX != newNominalWidthX
)
font.Private = private
fontGlyphOrder = set(font.getGlyphOrder())
for name in font.strings.strings:
if name in fontGlyphOrder:
glyphOrderStrings.append(name)
cs = font.CharStrings
gs = table.cff.GlobalSubrs
log.debug("Font %d CharStrings: %d.", i, len(cs))
chrset.extend(font.charset)
if newcs.charStringsAreIndexed:
for i, name in enumerate(cs.charStrings, start=len(newcs)):
newcs.charStrings[name] = i
newcs.charStringsIndex.items.append(None)
for name in cs.charStrings:
if widthsDiffer:
c = cs[name]
defaultWidthXToken = object()
extractor = T2WidthExtractor([], [], nominalWidthX, defaultWidthXToken)
extractor.execute(c)
width = extractor.width
if width is not defaultWidthXToken:
# The following will be wrong if the width is added
# by a subroutine. Ouch!
c.program.pop(0)
else:
width = defaultWidthX
if width != newDefaultWidthX:
c.program.insert(0, width - newNominalWidthX)
newcs[name] = cs[name]
newfont.charset = chrset
newfont.numGlyphs = len(chrset)
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
return newcff
@add_method(ttLib.getTableClass("cmap"))
def merge(self, m, tables):
if not hasattr(m, "cmap"):
computeMegaCmap(m, tables)
cmap = m.cmap
cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
self.tables = []
module = ttLib.getTableModule("cmap")
if len(cmapBmpOnly) != len(cmap):
# format-12 required.
cmapTable = module.cmap_classes[12](12)
cmapTable.platformID = 3
cmapTable.platEncID = 10
cmapTable.language = 0
cmapTable.cmap = cmap
self.tables.append(cmapTable)
# always create format-4
cmapTable = module.cmap_classes[4](4)
cmapTable.platformID = 3
cmapTable.platEncID = 1
cmapTable.language = 0
cmapTable.cmap = cmapBmpOnly
# ordered by platform then encoding
self.tables.insert(0, cmapTable)
uvsDict = m.uvsDict
if uvsDict:
# format-14
uvsTable = module.cmap_classes[14](14)
uvsTable.platformID = 0
uvsTable.platEncID = 5
uvsTable.language = 0
uvsTable.cmap = {}
uvsTable.uvsDict = uvsDict
# ordered by platform then encoding
self.tables.insert(0, uvsTable)
self.tableVersion = 0
self.numSubTables = len(self.tables)
return self
venv\Lib\site-packages\fontTools\merge\unicode.py
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
def is_Default_Ignorable(u):
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
#
# TODO Move me to unicodedata module and autogenerate.
#
# Unicode 14.0:
# $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
# 00AD # Cf SOFT HYPHEN
# 034F # Mn COMBINING GRAPHEME JOINER
# 061C # Cf ARABIC LETTER MARK
# 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
# 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
# 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
# 180E # Cf MONGOLIAN VOWEL SEPARATOR
# 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
# 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
# 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
# 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
# 2065 # Cn
# 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
# 3164 # Lo HANGUL FILLER
# FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
# FEFF # Cf ZERO WIDTH NO-BREAK SPACE
# FFA0 # Lo HALFWIDTH HANGUL FILLER
# FFF0..FFF8 # Cn [9] ..
# 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
# 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
# E0000 # Cn
# E0001 # Cf LANGUAGE TAG
# E0002..E001F # Cn [30] ..
# E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
# E0080..E00FF # Cn [128] ..
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
# E01F0..E0FFF # Cn [3600] ..
return (
u == 0x00AD
or u == 0x034F # Cf SOFT HYPHEN
or u == 0x061C # Mn COMBINING GRAPHEME JOINER
or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK
or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
<= u
<= 0x17B5
or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
<= u
<= 0x180D
or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
== 0x180E
or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR
or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
<= u
<= 0x2064
or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS
or 0x2066 <= u <= 0x206F # Cn
or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER
or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE
or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER
or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] ..
or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
<= u
<= 0x1D17A
or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
or u == 0xE0001 # Cn
or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG
or 0xE0020 <= u <= 0xE007F # Cn [30] ..
or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG
or 0xE0100 <= u <= 0xE01EF # Cn [128] ..
or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
<= u
<= 0xE0FFF
or False # Cn [3600] ..
)
venv\Lib\site-packages\fontTools\merge\util.py
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools.misc.timeTools import timestampNow
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from functools import reduce
import operator
import logging
log = logging.getLogger("fontTools.merge")
# General utility functions for merging values from different fonts
def equal(lst):
lst = list(lst)
t = iter(lst)
first = next(t)
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
return first
def first(lst):
return next(iter(lst))
def recalculate(lst):
return NotImplemented
def current_time(lst):
return timestampNow()
def bitwise_and(lst):
return reduce(operator.and_, lst)
def bitwise_or(lst):
return reduce(operator.or_, lst)
def avg_int(lst):
lst = list(lst)
return sum(lst) // len(lst)
def onlyExisting(func):
"""Returns a filter func that when called with a list,
only calls func on the non-NotImplemented items of the list,
and only so if there's at least one item remaining.
Otherwise returns NotImplemented."""
def wrapper(lst):
items = [item for item in lst if item is not NotImplemented]
return func(items) if items else NotImplemented
return wrapper
def sumLists(lst):
l = []
for item in lst:
l.extend(item)
return l
def sumDicts(lst):
d = {}
for item in lst:
d.update(item)
return d
def mergeBits(bitmap):
def wrapper(lst):
lst = list(lst)
returnValue = 0
for bitNumber in range(bitmap["size"]):
try:
mergeLogic = bitmap[bitNumber]
except KeyError:
try:
mergeLogic = bitmap["*"]
except KeyError:
raise Exception("Don't know how to merge bit %s" % bitNumber)
shiftedBit = 1 << bitNumber
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
returnValue |= mergedValue << bitNumber
return returnValue
return wrapper
class AttendanceRecordingIdentityDict(object):
"""A dictionary-like object that records indices of items actually accessed
from a list."""
def __init__(self, lst):
self.l = lst
self.d = {id(v): i for i, v in enumerate(lst)}
self.s = set()
def __getitem__(self, v):
self.s.add(self.d[id(v)])
return v
class GregariousIdentityDict(object):
"""A dictionary-like object that welcomes guests without reservations and
adds them to the end of the guest list."""
def __init__(self, lst):
self.l = lst
self.s = set(id(v) for v in lst)
def __getitem__(self, v):
if id(v) not in self.s:
self.s.add(id(v))
self.l.append(v)
return v
class NonhashableDict(object):
"""A dictionary-like object mapping objects to values."""
def __init__(self, keys, values=None):
if values is None:
self.d = {id(v): i for i, v in enumerate(keys)}
else:
self.d = {id(k): v for k, v in zip(keys, values)}
def __getitem__(self, k):
return self.d[id(k)]
def __setitem__(self, k, v):
self.d[id(k)] = v
def __delitem__(self, k):
del self.d[id(k)]
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools import ttLib
import fontTools.merge.base
from fontTools.merge.cmap import (
computeMegaGlyphOrder,
computeMegaCmap,
renameCFFCharStrings,
)
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
from fontTools.merge.options import Options
import fontTools.merge.tables
from fontTools.misc.loggingTools import Timer
from functools import reduce
import sys
import logging
log = logging.getLogger("fontTools.merge")
timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)
class Merger(object):
"""Font merger.
This class merges multiple files into a single OpenType font, taking into
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
cross-font metrics (for example ``hhea.ascent`` is set to the maximum value
across all the fonts).
If multiple glyphs map to the same Unicode value, and the glyphs are considered
sufficiently different (that is, they differ in any of paths, widths, or
height), then subsequent glyphs are renamed and a lookup in the ``locl``
feature will be created to disambiguate them. For example, if the arguments
are an Arabic font and a Latin font and both contain a set of parentheses,
the Latin glyphs will be renamed to ``parenleft.1`` and ``parenright.1``,
and a lookup will be inserted into the to ``locl`` feature (creating it if
necessary) under the ``latn`` script to substitute ``parenleft`` with
``parenleft.1`` etc.
Restrictions:
- All fonts must have the same units per em.
- If duplicate glyph disambiguation takes place as described above then the
fonts must have a ``GSUB`` table.
Attributes:
options: Currently unused.
"""
def __init__(self, options=None):
if not options:
options = Options()
self.options = options
def _openFonts(self, fontfiles):
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
for font, fontfile in zip(fonts, fontfiles):
font._merger__fontfile = fontfile
font._merger__name = font["name"].getDebugName(4)
return fonts
def merge(self, fontfiles):
"""Merges fonts together.
Args:
fontfiles: A list of file names to be merged
Returns:
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
this to write it out to an OTF file.
"""
#
# Settle on a mega glyph order.
#
fonts = self._openFonts(fontfiles)
glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
computeMegaGlyphOrder(self, glyphOrders)
# Take first input file sfntVersion
sfntVersion = fonts[0].sfntVersion
# Reload fonts and set new glyph names on them.
fonts = self._openFonts(fontfiles)
for font, glyphOrder in zip(fonts, glyphOrders):
font.setGlyphOrder(glyphOrder)
if "CFF " in font:
renameCFFCharStrings(self, glyphOrder, font["CFF "])
cmaps = [font["cmap"] for font in fonts]
self.duplicateGlyphsPerFont = [{} for _ in fonts]
computeMegaCmap(self, cmaps)
mega = ttLib.TTFont(sfntVersion=sfntVersion)
mega.setGlyphOrder(self.glyphOrder)
for font in fonts:
self._preMerge(font)
self.fonts = fonts
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
allTags.remove("GlyphOrder")
for tag in sorted(allTags):
if tag in self.options.drop_tables:
continue
with timer("merge '%s'" % tag):
tables = [font.get(tag, NotImplemented) for font in fonts]
log.info("Merging '%s'.", tag)
clazz = ttLib.getTableClass(tag)
table = clazz(tag).merge(self, tables)
# XXX Clean this up and use: table = mergeObjects(tables)
if table is not NotImplemented and table is not False:
mega[tag] = table
log.info("Merged '%s'.", tag)
else:
log.info("Dropped '%s'.", tag)
del self.duplicateGlyphsPerFont
del self.fonts
self._postMerge(mega)
return mega
def mergeObjects(self, returnTable, logic, tables):
# Right now we don't use self at all. Will use in the future
# for options and logging.
allKeys = set.union(
set(),
*(vars(table).keys() for table in tables if table is not NotImplemented),
)
for key in allKeys:
log.info(" %s", key)
try:
mergeLogic = logic[key]
except KeyError:
try:
mergeLogic = logic["*"]
except KeyError:
raise Exception(
"Don't know how to merge key %s of class %s"
% (key, returnTable.__class__.__name__)
)
if mergeLogic is NotImplemented:
continue
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
if value is not NotImplemented:
setattr(returnTable, key, value)
return returnTable
def _preMerge(self, font):
layoutPreMerge(font)
def _postMerge(self, font):
layoutPostMerge(font)
if "OS/2" in font:
# https://github.com/fonttools/fonttools/issues/2538
# TODO: Add an option to disable this?
font["OS/2"].recalcAvgCharWidth(font)
__all__ = ["Options", "Merger", "main"]
@timer("make one with everything (TOTAL TIME)")
def main(args=None):
"""Merge multiple fonts into one"""
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
options = Options()
args = options.parse_opts(args)
fontfiles = []
if options.input_file:
with open(options.input_file) as inputfile:
fontfiles = [
line.strip()
for line in inputfile.readlines()
if not line.lstrip().startswith("#")
]
for g in args:
fontfiles.append(g)
if len(fontfiles) < 1:
print(
"usage: fonttools merge [font1 ... fontN] [--input-file=filelist.txt] [--output-file=merged.ttf] [--import-file=tables.ttx]",
file=sys.stderr,
)
print(
" [--drop-tables=tags] [--verbose] [--timing]",
file=sys.stderr,
)
print("", file=sys.stderr)
print(" font1 ... fontN Files to merge.", file=sys.stderr)
print(
" --input-file= Read files to merge from a text file, each path new line. # Comment lines allowed.",
file=sys.stderr,
)
print(
" --output-file= Specify output file name (default: merged.ttf).",
file=sys.stderr,
)
print(
" --import-file= TTX file to import after merging. This can be used to set metadata.",
file=sys.stderr,
)
print(
" --drop-tables=
Comma separated list of table tags to skip, case sensitive.",
file=sys.stderr,
)
print(
" --verbose Output progress information.",
file=sys.stderr,
)
print(" --timing Output progress timing.", file=sys.stderr)
return 1
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
if options.timing:
timer.logger.setLevel(logging.DEBUG)
else:
timer.logger.disabled = True
merger = Merger(options=options)
font = merger.merge(fontfiles)
if options.import_file:
font.importXML(options.import_file)
with timer("compile and save font"):
font.save(options.output_file)
if __name__ == "__main__":
sys.exit(main())
""" fontTools.misc.classifyTools.py -- tools for classifying things.
"""
class Classifier(object):
"""
Main Classifier object, used to classify things into similar sets.
"""
def __init__(self, sort=True):
self._things = set() # set of all things known so far
self._sets = [] # list of class sets produced so far
self._mapping = {} # map from things to their class set
self._dirty = False
self._sort = sort
def add(self, set_of_things):
"""
Add a set to the classifier. Any iterable is accepted.
"""
if not set_of_things:
return
self._dirty = True
things, sets, mapping = self._things, self._sets, self._mapping
s = set(set_of_things)
intersection = s.intersection(things) # existing things
s.difference_update(intersection) # new things
difference = s
del s
# Add new class for new things
if difference:
things.update(difference)
sets.append(difference)
for thing in difference:
mapping[thing] = difference
del difference
while intersection:
# Take one item and process the old class it belongs to
old_class = mapping[next(iter(intersection))]
old_class_intersection = old_class.intersection(intersection)
# Update old class to remove items from new set
old_class.difference_update(old_class_intersection)
# Remove processed items from todo list
intersection.difference_update(old_class_intersection)
# Add new class for the intersection with old class
sets.append(old_class_intersection)
for thing in old_class_intersection:
mapping[thing] = old_class_intersection
del old_class_intersection
def update(self, list_of_sets):
"""
Add a a list of sets to the classifier. Any iterable of iterables is accepted.
"""
for s in list_of_sets:
self.add(s)
def _process(self):
if not self._dirty:
return
# Do any deferred processing
sets = self._sets
self._sets = [s for s in sets if s]
if self._sort:
self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))
self._dirty = False
# Output methods
def getThings(self):
"""Returns the set of all things known so far.
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._things
def getMapping(self):
"""Returns the mapping from things to their class set.
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._mapping
def getClasses(self):
"""Returns the list of class sets.
The return value belongs to the Classifier object and should NOT
be modified while the classifier is still in use.
"""
self._process()
return self._sets
def classify(list_of_sets, sort=True):
"""
Takes a iterable of iterables (list of sets from here on; but any
iterable works.), and returns the smallest list of sets such that
each set, is either a subset, or is disjoint from, each of the input
sets.
In other words, this function classifies all the things present in
any of the input sets, into similar classes, based on which sets
things are a member of.
If sort=True, return class sets are sorted by decreasing size and
their natural sort order within each class size. Otherwise, class
sets are returned in the order that they were identified, which is
generally not significant.
>>> classify([]) == ([], {})
True
>>> classify([[]]) == ([], {})
True
>>> classify([[], []]) == ([], {})
True
>>> classify([[1]]) == ([{1}], {1: {1}})
True
>>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
True
>>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True
>>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
True
>>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
True
>>> classify([[1,2],[2,4,5]]) == (
... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True
>>> classify([[1,2],[2,4,5]], sort=False) == (
... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
True
>>> classify([[1,2,9],[2,4,5]], sort=False) == (
... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},
... 9: {1, 9}})
True
>>> classify([[1,2,9,15],[2,4,5]], sort=False) == (
... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},
... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})
True
>>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
>>> set([frozenset(c) for c in classes]) == set(
... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
True
>>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
True
"""
classifier = Classifier(sort=sort)
classifier.update(list_of_sets)
return classifier.getClasses(), classifier.getMapping()
if __name__ == "__main__":
import sys, doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
venv\Lib\site-packages\fontTools\misc\cliTools.py
"""Collection of utilities for command-line interfaces and console scripts."""
import os
import re
numberAddedRE = re.compile(r"#\d+$")
def makeOutputFileName(
input, outputDir=None, extension=None, overWrite=False, suffix=""
):
"""Generates a suitable file name for writing output.
Often tools will want to take a file, do some kind of transformation to it,
and write it out again. This function determines an appropriate name for the
output file, through one or more of the following steps:
- changing the output directory
- appending suffix before file extension
- replacing the file extension
- suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid
overwriting an existing file.
Args:
input: Name of input file.
outputDir: Optionally, a new directory to write the file into.
suffix: Optionally, a string suffix is appended to file name before
the extension.
extension: Optionally, a replacement for the current file extension.
overWrite: Overwriting an existing file is permitted if true; if false
and the proposed filename exists, a new name will be generated by
adding an appropriate number suffix.
Returns:
str: Suitable output filename
"""
dirName, fileName = os.path.split(input)
fileName, ext = os.path.splitext(fileName)
if outputDir:
dirName = outputDir
fileName = numberAddedRE.split(fileName)[0]
if extension is None:
extension = os.path.splitext(input)[1]
output = os.path.join(dirName, fileName + suffix + extension)
n = 1
if not overWrite:
while os.path.exists(output):
output = os.path.join(
dirName, fileName + suffix + "#" + repr(n) + extension
)
n += 1
return output
"""
Code of the config system; not related to fontTools or fonts in particular.
The options that are specific to fontTools are in :mod:`fontTools.config`.
To create your own config system, you need to create an instance of
:class:`Options`, and a subclass of :class:`AbstractConfig` with its
``options`` class variable set to your instance of Options.
"""
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterable,
Mapping,
MutableMapping,
Optional,
Set,
Union,
)
log = logging.getLogger(__name__)
__all__ = [
"AbstractConfig",
"ConfigAlreadyRegisteredError",
"ConfigError",
"ConfigUnknownOptionError",
"ConfigValueParsingError",
"ConfigValueValidationError",
"Option",
"Options",
]
class ConfigError(Exception):
"""Base exception for the config module."""
class ConfigAlreadyRegisteredError(ConfigError):
"""Raised when a module tries to register a configuration option that
already exists.
Should not be raised too much really, only when developing new fontTools
modules.
"""
def __init__(self, name):
super().__init__(f"Config option {name} is already registered.")
class ConfigValueParsingError(ConfigError):
"""Raised when a configuration value cannot be parsed."""
def __init__(self, name, value):
super().__init__(
f"Config option {name}: value cannot be parsed (given {repr(value)})"
)
class ConfigValueValidationError(ConfigError):
"""Raised when a configuration value cannot be validated."""
def __init__(self, name, value):
super().__init__(
f"Config option {name}: value is invalid (given {repr(value)})"
)
class ConfigUnknownOptionError(ConfigError):
"""Raised when a configuration option is unknown."""
def __init__(self, option_or_name):
name = (
f"'{option_or_name.name}' (id={id(option_or_name)})>"
if isinstance(option_or_name, Option)
else f"'{option_or_name}'"
)
super().__init__(f"Config option {name} is unknown")
# eq=False because Options are unique, not fungible objects
@dataclass(frozen=True, eq=False)
class Option:
name: str
"""Unique name identifying the option (e.g. package.module:MY_OPTION)."""
help: str
"""Help text for this option."""
default: Any
"""Default value for this option."""
parse: Callable[[str], Any]
"""Turn input (e.g. string) into proper type. Only when reading from file."""
validate: Optional[Callable[[Any], bool]] = None
"""Return true if the given value is an acceptable value."""
@staticmethod
def parse_optional_bool(v: str) -> Optional[bool]:
s = str(v).lower()
if s in {"0", "no", "false"}:
return False
if s in {"1", "yes", "true"}:
return True
if s in {"auto", "none"}:
return None
raise ValueError("invalid optional bool: {v!r}")
@staticmethod
def validate_optional_bool(v: Any) -> bool:
return v is None or isinstance(v, bool)
class Options(Mapping):
"""Registry of available options for a given config system.
Define new options using the :meth:`register()` method.
Access existing options using the Mapping interface.
"""
__options: Dict[str, Option]
def __init__(self, other: "Options" = None) -> None:
self.__options = {}
if other is not None:
for option in other.values():
self.register_option(option)
def register(
self,
name: str,
help: str,
default: Any,
parse: Callable[[str], Any],
validate: Optional[Callable[[Any], bool]] = None,
) -> Option:
"""Create and register a new option."""
return self.register_option(Option(name, help, default, parse, validate))
def register_option(self, option: Option) -> Option:
"""Register a new option."""
name = option.name
if name in self.__options:
raise ConfigAlreadyRegisteredError(name)
self.__options[name] = option
return option
def is_registered(self, option: Option) -> bool:
"""Return True if the same option object is already registered."""
return self.__options.get(option.name) is option
def __getitem__(self, key: str) -> Option:
return self.__options.__getitem__(key)
def __iter__(self) -> Iterator[str]:
return self.__options.__iter__()
def __len__(self) -> int:
return self.__options.__len__()
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({{\n"
+ "".join(
f" {k!r}: Option(default={v.default!r}, ...),\n"
for k, v in self.__options.items()
)
+ "})"
)
_USE_GLOBAL_DEFAULT = object()
class AbstractConfig(MutableMapping):
"""
Create a set of config values, optionally pre-filled with values from
the given dictionary or pre-existing config object.
The class implements the MutableMapping protocol keyed by option name (`str`).
For convenience its methods accept either Option or str as the key parameter.
.. seealso:: :meth:`set()`
This config class is abstract because it needs its ``options`` class
var to be set to an instance of :class:`Options` before it can be
instanciated and used.
.. code:: python
class MyConfig(AbstractConfig):
options = Options()
MyConfig.register_option( "test:option_name", "This is an option", 0, int, lambda v: isinstance(v, int))
cfg = MyConfig({"test:option_name": 10})
"""
options: ClassVar[Options]
@classmethod
def register_option(
cls,
name: str,
help: str,
default: Any,
parse: Callable[[str], Any],
validate: Optional[Callable[[Any], bool]] = None,
) -> Option:
"""Register an available option in this config system."""
return cls.options.register(
name, help=help, default=default, parse=parse, validate=validate
)
_values: Dict[str, Any]
def __init__(
self,
values: Union[AbstractConfig, Dict[Union[Option, str], Any]] = {},
parse_values: bool = False,
skip_unknown: bool = False,
):
self._values = {}
values_dict = values._values if isinstance(values, AbstractConfig) else values
for name, value in values_dict.items():
self.set(name, value, parse_values, skip_unknown)
def _resolve_option(self, option_or_name: Union[Option, str]) -> Option:
if isinstance(option_or_name, Option):
option = option_or_name
if not self.options.is_registered(option):
raise ConfigUnknownOptionError(option)
return option
elif isinstance(option_or_name, str):
name = option_or_name
try:
return self.options[name]
except KeyError:
raise ConfigUnknownOptionError(name)
else:
raise TypeError(
"expected Option or str, found "
f"{type(option_or_name).__name__}: {option_or_name!r}"
)
def set(
self,
option_or_name: Union[Option, str],
value: Any,
parse_values: bool = False,
skip_unknown: bool = False,
):
"""Set the value of an option.
Args:
* `option_or_name`: an `Option` object or its name (`str`).
* `value`: the value to be assigned to given option.
* `parse_values`: parse the configuration value from a string into
its proper type, as per its `Option` object. The default
behavior is to raise `ConfigValueValidationError` when the value
is not of the right type. Useful when reading options from a
file type that doesn't support as many types as Python.
* `skip_unknown`: skip unknown configuration options. The default
behaviour is to raise `ConfigUnknownOptionError`. Useful when
reading options from a configuration file that has extra entries
(e.g. for a later version of fontTools)
"""
try:
option = self._resolve_option(option_or_name)
except ConfigUnknownOptionError as e:
if skip_unknown:
log.debug(str(e))
return
raise
# Can be useful if the values come from a source that doesn't have
# strict typing (.ini file? Terminal input?)
if parse_values:
try:
value = option.parse(value)
except Exception as e:
raise ConfigValueParsingError(option.name, value) from e
if option.validate is not None and not option.validate(value):
raise ConfigValueValidationError(option.name, value)
self._values[option.name] = value
def get(
self, option_or_name: Union[Option, str], default: Any = _USE_GLOBAL_DEFAULT
) -> Any:
"""
Get the value of an option. The value which is returned is the first
provided among:
1. a user-provided value in the options's ``self._values`` dict
2. a caller-provided default value to this method call
3. the global default for the option provided in ``fontTools.config``
This is to provide the ability to migrate progressively from config
options passed as arguments to fontTools APIs to config options read
from the current TTFont, e.g.
.. code:: python
def fontToolsAPI(font, some_option):
value = font.cfg.get("someLib.module:SOME_OPTION", some_option)
# use value
That way, the function will work the same for users of the API that
still pass the option to the function call, but will favour the new
config mechanism if the given font specifies a value for that option.
"""
option = self._resolve_option(option_or_name)
if option.name in self._values:
return self._values[option.name]
if default is not _USE_GLOBAL_DEFAULT:
return default
return option.default
def copy(self):
return self.__class__(self._values)
def __getitem__(self, option_or_name: Union[Option, str]) -> Any:
return self.get(option_or_name)
def __setitem__(self, option_or_name: Union[Option, str], value: Any) -> None:
return self.set(option_or_name, value)
def __delitem__(self, option_or_name: Union[Option, str]) -> None:
option = self._resolve_option(option_or_name)
del self._values[option.name]
def __iter__(self) -> Iterable[str]:
return self._values.__iter__()
def __len__(self) -> int:
return len(self._values)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({repr(self._values)})"
venv\Lib\site-packages\fontTools\misc\cython.py
""" Exports a no-op 'cython' namespace similar to
https://github.com/cython/cython/blob/master/Cython/Shadow.py
This allows to optionally compile @cython decorated functions
(when cython is available at built time), or run the same code
as pure-python, without runtime dependency on cython module.
We only define the symbols that we use. E.g. see fontTools.cu2qu
"""
from types import SimpleNamespace
def _empty_decorator(x):
return x
compiled = False
for name in ("double", "complex", "int"):
globals()[name] = None
for name in ("cfunc", "inline"):
globals()[name] = _empty_decorator
locals = lambda **_: _empty_decorator
returns = lambda _: _empty_decorator
"""Misc dict tools."""
__all__ = ["hashdict"]
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into
other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),
)
def __hash__(self):
return hash(self.__key())
def __setitem__(self, key, value):
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def __delitem__(self, key):
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def clear(self):
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def pop(self, *args, **kwargs):
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def popitem(self, *args, **kwargs):
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def setdefault(self, *args, **kwargs):
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
def update(self, *args, **kwargs):
raise TypeError(
"{0} does not support item assignment".format(self.__class__.__name__)
)
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right):
result = hashdict(self)
dict.update(result, right)
return result
venv\Lib\site-packages\fontTools\misc\eexec.py
"""
PostScript Type 1 fonts make use of two types of encryption: charstring
encryption and ``eexec`` encryption. Charstring encryption is used for
the charstrings themselves, while ``eexec`` is used to encrypt larger
sections of the font program, such as the ``Private`` and ``CharStrings``
dictionaries. Despite the different names, the algorithm is the same,
although ``eexec`` encryption uses a fixed initial key R=55665.
The algorithm uses cipher feedback, meaning that the ciphertext is used
to modify the key. Because of this, the routines in this module return
the new key at the end of the operation.
"""
from fontTools.misc.textTools import bytechr, bytesjoin, byteord
def _decryptChar(cipher, R):
cipher = byteord(cipher)
plain = ((cipher ^ (R >> 8))) & 0xFF
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
return bytechr(plain), R
def _encryptChar(plain, R):
plain = byteord(plain)
cipher = ((plain ^ (R >> 8))) & 0xFF
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
return bytechr(cipher), R
def decrypt(cipherstring, R):
r"""
Decrypts a string using the Type 1 encryption algorithm.
Args:
cipherstring: String of ciphertext.
R: Initial key.
Returns:
decryptedStr: Plaintext string.
R: Output key for subsequent decryptions.
Examples::
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
"""
plainList = []
for cipher in cipherstring:
plain, R = _decryptChar(cipher, R)
plainList.append(plain)
plainstring = bytesjoin(plainList)
return plainstring, int(R)
def encrypt(plainstring, R):
r"""
Encrypts a string using the Type 1 encryption algorithm.
Note that the algorithm as described in the Type 1 specification requires the
plaintext to be prefixed with a number of random bytes. (For ``eexec`` the
number of random bytes is set to 4.) This routine does *not* add the random
prefix to its input.
Args:
plainstring: String of plaintext.
R: Initial key.
Returns:
cipherstring: Ciphertext string.
R: Output key for subsequent encryptions.
Examples::
>>> testStr = b"\0\0asdadads asds\265"
>>> decryptedStr, R = decrypt(testStr, 12321)
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
True
>>> R == 36142
True
>>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
>>> encryptedStr, R = encrypt(testStr, 12321)
>>> encryptedStr == b"\0\0asdadads asds\265"
True
>>> R == 36142
True
"""
cipherList = []
for plain in plainstring:
cipher, R = _encryptChar(plain, R)
cipherList.append(cipher)
cipherstring = bytesjoin(cipherList)
return cipherstring, int(R)
def hexString(s):
import binascii
return binascii.hexlify(s)
def deHexString(h):
import binascii
h = bytesjoin(h.split())
return binascii.unhexlify(h)
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings.
"""
import fontTools.encodings.codecs
# Map keyed by platformID, then platEncID, then possibly langID
_encodingMap = {
0: { # Unicode
0: "utf_16_be",
1: "utf_16_be",
2: "utf_16_be",
3: "utf_16_be",
4: "utf_16_be",
5: "utf_16_be",
6: "utf_16_be",
},
1: { # Macintosh
# See
# https://github.com/fonttools/fonttools/issues/236
0: { # Macintosh, platEncID==0, keyed by langID
15: "mac_iceland",
17: "mac_turkish",
18: "mac_croatian",
24: "mac_latin2",
25: "mac_latin2",
26: "mac_latin2",
27: "mac_latin2",
28: "mac_latin2",
36: "mac_latin2",
37: "mac_romanian",
38: "mac_latin2",
39: "mac_latin2",
40: "mac_latin2",
Ellipsis: "mac_roman", # Other
},
1: "x_mac_japanese_ttx",
2: "x_mac_trad_chinese_ttx",
3: "x_mac_korean_ttx",
6: "mac_greek",
7: "mac_cyrillic",
25: "x_mac_simp_chinese_ttx",
29: "mac_latin2",
35: "mac_turkish",
37: "mac_iceland",
},
2: { # ISO
0: "ascii",
1: "utf_16_be",
2: "latin1",
},
3: { # Microsoft
0: "utf_16_be",
1: "utf_16_be",
2: "shift_jis",
3: "gb2312",
4: "big5",
5: "euc_kr",
6: "johab",
10: "utf_16_be",
},
}
def getEncoding(platformID, platEncID, langID, default=None):
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
triplet. If encoding for these values is not known, by default None is
returned. That can be overriden by passing a value to the default argument.
"""
encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
if isinstance(encoding, dict):
encoding = encoding.get(langID, encoding[Ellipsis])
return encoding
venv\Lib\site-packages\fontTools\misc\etree.py
"""Shim module exporting the same ElementTree API for lxml and
xml.etree backends.
When lxml is installed, it is automatically preferred over the built-in
xml.etree module.
On Python 2.7, the cElementTree module is preferred over the pure-python
ElementTree module.
Besides exporting a unified interface, this also defines extra functions
or subclasses built-in ElementTree classes to add features that are
only availble in lxml, like OrderedDict for attributes, pretty_print and
iterwalk.
"""
from fontTools.misc.textTools import tostr
XML_DECLARATION = """"""
__all__ = [
# public symbols
"Comment",
"dump",
"Element",
"ElementTree",
"fromstring",
"fromstringlist",
"iselement",
"iterparse",
"parse",
"ParseError",
"PI",
"ProcessingInstruction",
"QName",
"SubElement",
"tostring",
"tostringlist",
"TreeBuilder",
"XML",
"XMLParser",
"register_namespace",
]
try:
from lxml.etree import *
_have_lxml = True
except ImportError:
try:
from xml.etree.cElementTree import *
# the cElementTree version of XML function doesn't support
# the optional 'parser' keyword argument
from xml.etree.ElementTree import XML
except ImportError: # pragma: no cover
from xml.etree.ElementTree import *
_have_lxml = False
_Attrib = dict
if isinstance(Element, type):
_Element = Element
else:
# in py27, cElementTree.Element cannot be subclassed, so
# we need to import the pure-python class
from xml.etree.ElementTree import Element as _Element
class Element(_Element):
"""Element subclass that keeps the order of attributes."""
def __init__(self, tag, attrib=_Attrib(), **extra):
super(Element, self).__init__(tag)
self.attrib = _Attrib()
if attrib:
self.attrib.update(attrib)
if extra:
self.attrib.update(extra)
def SubElement(parent, tag, attrib=_Attrib(), **extra):
"""Must override SubElement as well otherwise _elementtree.SubElement
fails if 'parent' is a subclass of Element object.
"""
element = parent.__class__(tag, attrib, **extra)
parent.append(element)
return element
def _iterwalk(element, events, tag):
include = tag is None or element.tag == tag
if include and "start" in events:
yield ("start", element)
for e in element:
for item in _iterwalk(e, events, tag):
yield item
if include:
yield ("end", element)
def iterwalk(element_or_tree, events=("end",), tag=None):
"""A tree walker that generates events from an existing tree as
if it was parsing XML data with iterparse().
Drop-in replacement for lxml.etree.iterwalk.
"""
if iselement(element_or_tree):
element = element_or_tree
else:
element = element_or_tree.getroot()
if tag == "*":
tag = None
for item in _iterwalk(element, events, tag):
yield item
_ElementTree = ElementTree
class ElementTree(_ElementTree):
"""ElementTree subclass that adds 'pretty_print' and 'doctype'
arguments to the 'write' method.
Currently these are only supported for the default XML serialization
'method', and not also for "html" or "text", for these are delegated
to the base class.
"""
def write(
self,
file_or_filename,
encoding=None,
xml_declaration=False,
method=None,
doctype=None,
pretty_print=False,
):
if method and method != "xml":
# delegate to super-class
super(ElementTree, self).write(
file_or_filename,
encoding=encoding,
xml_declaration=xml_declaration,
method=method,
)
return
if encoding is not None and encoding.lower() == "unicode":
if xml_declaration:
raise ValueError(
"Serialisation to unicode must not request an XML declaration"
)
write_declaration = False
encoding = "unicode"
elif xml_declaration is None:
# by default, write an XML declaration only for non-standard encodings
write_declaration = encoding is not None and encoding.upper() not in (
"ASCII",
"UTF-8",
"UTF8",
"US-ASCII",
)
else:
write_declaration = xml_declaration
if encoding is None:
encoding = "ASCII"
if pretty_print:
# NOTE this will modify the tree in-place
_indent(self._root)
with _get_writer(file_or_filename, encoding) as write:
if write_declaration:
write(XML_DECLARATION % encoding.upper())
if pretty_print:
write("\n")
if doctype:
write(_tounicode(doctype))
if pretty_print:
write("\n")
qnames, namespaces = _namespaces(self._root)
_serialize_xml(write, self._root, qnames, namespaces)
import io
def tostring(
element,
encoding=None,
xml_declaration=None,
method=None,
doctype=None,
pretty_print=False,
):
"""Custom 'tostring' function that uses our ElementTree subclass, with
pretty_print support.
"""
stream = io.StringIO() if encoding == "unicode" else io.BytesIO()
ElementTree(element).write(
stream,
encoding=encoding,
xml_declaration=xml_declaration,
method=method,
doctype=doctype,
pretty_print=pretty_print,
)
return stream.getvalue()
# serialization support
import re
# Valid XML strings can include any Unicode character, excluding control
# characters, the surrogate blocks, FFFE, and FFFF:
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
# Here we reversed the pattern to match only the invalid characters.
_invalid_xml_string = re.compile(
"[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uD800-\uDFFF\uFFFE-\uFFFF]"
)
def _tounicode(s):
"""Test if a string is valid user input and decode it to unicode string
using ASCII encoding if it's a bytes string.
Reject all bytes/unicode input that contains non-XML characters.
Reject all bytes input that contains non-ASCII characters.
"""
try:
s = tostr(s, encoding="ascii", errors="strict")
except UnicodeDecodeError:
raise ValueError(
"Bytes strings can only contain ASCII characters. "
"Use unicode strings for non-ASCII characters."
)
except AttributeError:
_raise_serialization_error(s)
if s and _invalid_xml_string.search(s):
raise ValueError(
"All strings must be XML compatible: Unicode or ASCII, "
"no NULL bytes or control characters"
)
return s
import contextlib
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
f = open(
file_or_filename,
"w",
encoding="utf-8" if encoding == "unicode" else encoding,
errors="xmlcharrefreplace",
)
with f:
yield f.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
detach_buffer = False
if isinstance(file_or_filename, io.BufferedIOBase):
buf = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
buf = io.BufferedWriter(file_or_filename)
detach_buffer = True
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buf = io.BufferedIOBase()
buf.writable = lambda: True
buf.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buf.seekable = file_or_filename.seekable
buf.tell = file_or_filename.tell
except AttributeError:
pass
wrapper = io.TextIOWrapper(
buf,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n",
)
try:
yield wrapper.write
finally:
# Keep the original file open when the TextIOWrapper and
# the BufferedWriter are destroyed
wrapper.detach()
if detach_buffer:
buf.detach()
from xml.etree.ElementTree import _namespace_map
def _namespaces(elem):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
def add_qname(qname):
# calculate serialized qname representation
try:
qname = _tounicode(qname)
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
else:
prefix = _tounicode(prefix)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("" % _tounicode(text))
elif tag is ProcessingInstruction:
write("%s?>" % _tounicode(text))
else:
tag = qnames[_tounicode(tag) if tag is not None else None]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
else:
write("<" + tag)
if namespaces:
for uri, prefix in sorted(
namespaces.items(), key=lambda x: x[1]
): # sort on prefix
if prefix:
prefix = ":" + prefix
write(' xmlns%s="%s"' % (prefix, _escape_attrib(uri)))
attrs = elem.attrib
if attrs:
# try to keep existing attrib order
if len(attrs) <= 1 or type(attrs) is _Attrib:
items = attrs.items()
else:
# if plain dict, use lexical order
items = sorted(attrs.items())
for k, v in items:
if isinstance(k, QName):
k = _tounicode(k.text)
else:
k = _tounicode(k)
if isinstance(v, QName):
v = qnames[_tounicode(v.text)]
else:
v = _escape_attrib(v)
write(' %s="%s"' % (qnames[k], v))
if text is not None or len(elem):
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
write("" + tag + ">")
else:
write("/>")
if elem.tail:
write(_escape_cdata(elem.tail))
def _raise_serialization_error(text):
raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
def _escape_cdata(text):
# escape character data
try:
text = _tounicode(text)
# it's worth avoiding do-nothing calls for short strings
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
text = _tounicode(text)
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if '"' in text:
text = text.replace('"', """)
if "\n" in text:
text = text.replace("\n", "
")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _indent(elem, level=0):
# From http://effbot.org/zone/element-lib.htm#prettyprint
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
"""
This module implements the algorithm for converting between a "user name" -
something that a user can choose arbitrarily inside a font editor - and a file
name suitable for use in a wide range of operating systems and filesystems.
The `UFO 3 specification `_
provides an example of an algorithm for such conversion, which avoids illegal
characters, reserved file names, ambiguity between upper- and lower-case
characters, and clashes with existing files.
This code was originally copied from
`ufoLib `_
by Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers:
- Erik van Blokland
- Tal Leming
- Just van Rossum
"""
illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
illegalCharacters += [chr(i) for i in range(1, 32)]
illegalCharacters += [chr(0x7F)]
reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")
reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ")
maxFileNameLength = 255
class NameTranslationError(Exception):
pass
def userNameToFileName(userName, existing=[], prefix="", suffix=""):
"""Converts from a user name to a file name.
Takes care to avoid illegal characters, reserved file names, ambiguity between
upper- and lower-case characters, and clashes with existing files.
Args:
userName (str): The input file name.
existing: A case-insensitive list of all existing file names.
prefix: Prefix to be prepended to the file name.
suffix: Suffix to be appended to the file name.
Returns:
A suitable filename.
Raises:
NameTranslationError: If no suitable name could be generated.
Examples::
>>> userNameToFileName("a") == "a"
True
>>> userNameToFileName("A") == "A_"
True
>>> userNameToFileName("AE") == "A_E_"
True
>>> userNameToFileName("Ae") == "A_e"
True
>>> userNameToFileName("ae") == "ae"
True
>>> userNameToFileName("aE") == "aE_"
True
>>> userNameToFileName("a.alt") == "a.alt"
True
>>> userNameToFileName("A.alt") == "A_.alt"
True
>>> userNameToFileName("A.Alt") == "A_.A_lt"
True
>>> userNameToFileName("A.aLt") == "A_.aL_t"
True
>>> userNameToFileName(u"A.alT") == "A_.alT_"
True
>>> userNameToFileName("T_H") == "T__H_"
True
>>> userNameToFileName("T_h") == "T__h"
True
>>> userNameToFileName("t_h") == "t_h"
True
>>> userNameToFileName("F_F_I") == "F__F__I_"
True
>>> userNameToFileName("f_f_i") == "f_f_i"
True
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
True
>>> userNameToFileName(".notdef") == "_notdef"
True
>>> userNameToFileName("con") == "_con"
True
>>> userNameToFileName("CON") == "C_O_N_"
True
>>> userNameToFileName("con.alt") == "_con.alt"
True
>>> userNameToFileName("alt.con") == "alt._con"
True
"""
# the incoming name must be a str
if not isinstance(userName, str):
raise ValueError("The value for userName must be a string.")
# establish the prefix and suffix lengths
prefixLength = len(prefix)
suffixLength = len(suffix)
# replace an initial period with an _
# if no prefix is to be added
if not prefix and userName[0] == ".":
userName = "_" + userName[1:]
# filter the user name
filteredUserName = []
for character in userName:
# replace illegal characters with _
if character in illegalCharacters:
character = "_"
# add _ to all non-lower characters
elif character != character.lower():
character += "_"
filteredUserName.append(character)
userName = "".join(filteredUserName)
# clip to 255
sliceLength = maxFileNameLength - prefixLength - suffixLength
userName = userName[:sliceLength]
# test for illegal files names
parts = []
for part in userName.split("."):
if part.lower() in reservedFileNames:
part = "_" + part
parts.append(part)
userName = ".".join(parts)
# test for clash
fullName = prefix + userName + suffix
if fullName.lower() in existing:
fullName = handleClash1(userName, existing, prefix, suffix)
# finished
return fullName
def handleClash1(userName, existing=[], prefix="", suffix=""):
"""
existing should be a case-insensitive list
of all existing file names.
>>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10)
>>> existing = ["a" * 5]
>>> e = list(existing)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
>>> e = list(existing)
>>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000002.0000000000')
True
>>> e = list(existing)
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
>>> handleClash1(userName="A" * 5, existing=e,
... prefix=prefix, suffix=suffix) == (
... '00000.AAAAA000000000000001.0000000000')
True
"""
# if the prefix length + user name length + suffix length + 15 is at
# or past the maximum length, silce 15 characters off of the user name
prefixLength = len(prefix)
suffixLength = len(suffix)
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
l = prefixLength + len(userName) + suffixLength + 15
sliceLength = maxFileNameLength - l
userName = userName[:sliceLength]
finalName = None
# try to add numbers to create a unique name
counter = 1
while finalName is None:
name = userName + str(counter).zfill(15)
fullName = prefix + name + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= 999999999999999:
break
# if there is a clash, go to the next fallback
if finalName is None:
finalName = handleClash2(existing, prefix, suffix)
# finished
return finalName
def handleClash2(existing=[], prefix="", suffix=""):
"""
existing should be a case-insensitive list
of all existing file names.
>>> prefix = ("0" * 5) + "."
>>> suffix = "." + ("0" * 10)
>>> existing = [prefix + str(i) + suffix for i in range(100)]
>>> e = list(existing)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.100.0000000000')
True
>>> e = list(existing)
>>> e.remove(prefix + "1" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.1.0000000000')
True
>>> e = list(existing)
>>> e.remove(prefix + "2" + suffix)
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
... '00000.2.0000000000')
True
"""
# calculate the longest possible string
maxLength = maxFileNameLength - len(prefix) - len(suffix)
maxValue = int("9" * maxLength)
# try to find a number
finalName = None
counter = 1
while finalName is None:
fullName = prefix + str(counter) + suffix
if fullName.lower() not in existing:
finalName = fullName
break
else:
counter += 1
if counter >= maxValue:
break
# raise an error if nothing has been found
if finalName is None:
raise NameTranslationError("No unique name could be found.")
# finished
return finalName
if __name__ == "__main__":
import doctest
import sys
sys.exit(doctest.testmod().failed)
"""
The `OpenType specification `_
defines two fixed-point data types:
``Fixed``
A 32-bit signed fixed-point number with a 16 bit twos-complement
magnitude component and 16 fractional bits.
``F2DOT14``
A 16-bit signed fixed-point number with a 2 bit twos-complement
magnitude component and 14 fractional bits.
To support reading and writing data with these data types, this module provides
functions for converting between fixed-point, float and string representations.
.. data:: MAX_F2DOT14
The maximum value that can still fit in an F2Dot14. (1.99993896484375)
"""
from .roundTools import otRound, nearestMultipleShortestRepr
import logging
log = logging.getLogger(__name__)
__all__ = [
"MAX_F2DOT14",
"fixedToFloat",
"floatToFixed",
"floatToFixedToFloat",
"floatToFixedToStr",
"fixedToStr",
"strToFixed",
"strToFixedToFloat",
"ensureVersionIsLong",
"versionToFixed",
]
MAX_F2DOT14 = 0x7FFF / (1 << 14)
def fixedToFloat(value, precisionBits):
"""Converts a fixed-point number to a float given the number of
precision bits.
Args:
value (int): Number in fixed-point format.
precisionBits (int): Number of precision bits.
Returns:
Floating point value.
Examples::
>>> import math
>>> f = fixedToFloat(-10139, precisionBits=14)
>>> math.isclose(f, -0.61883544921875)
True
"""
return value / (1 << precisionBits)
def floatToFixed(value, precisionBits):
"""Converts a float to a fixed-point number given the number of
precision bits.
Args:
value (float): Floating point value.
precisionBits (int): Number of precision bits.
Returns:
int: Fixed-point representation.
Examples::
>>> floatToFixed(-0.61883544921875, precisionBits=14)
-10139
>>> floatToFixed(-0.61884, precisionBits=14)
-10139
"""
return otRound(value * (1 << precisionBits))
def floatToFixedToFloat(value, precisionBits):
"""Converts a float to a fixed-point number and back again.
By converting the float to fixed, rounding it, and converting it back
to float again, this returns a floating point values which is exactly
representable in fixed-point format.
Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
Args:
value (float): The input floating point value.
precisionBits (int): Number of precision bits.
Returns:
float: The transformed and rounded value.
Examples::
>>> import math
>>> f1 = -0.61884
>>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
>>> f1 != f2
True
>>> math.isclose(f2, -0.61883544921875)
True
"""
scale = 1 << precisionBits
return otRound(value * scale) / scale
def fixedToStr(value, precisionBits):
"""Converts a fixed-point number to a string representing a decimal float.
This chooses the float that has the shortest decimal representation (the least
number of fractional decimal digits).
For example, to convert a fixed-point number in a 2.14 format, use
``precisionBits=14``::
>>> fixedToStr(-10139, precisionBits=14)
'-0.61884'
This is pretty slow compared to the simple division used in ``fixedToFloat``.
Use sporadically when you need to serialize or print the fixed-point number in
a human-readable form.
It uses nearestMultipleShortestRepr under the hood.
Args:
value (int): The fixed-point value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
str: A string representation of the value.
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale)
def strToFixed(string, precisionBits):
"""Converts a string representing a decimal float to a fixed-point number.
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
int: Fixed-point representation.
Examples::
>>> ## to convert a float string to a 2.14 fixed-point number:
>>> strToFixed('-0.61884', precisionBits=14)
-10139
"""
value = float(string)
return otRound(value * (1 << precisionBits))
def strToFixedToFloat(string, precisionBits):
"""Convert a string to a decimal float with fixed-point rounding.
This first converts string to a float, then turns it into a fixed-point
number with ``precisionBits`` fractional binary digits, then back to a
float again.
This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
Args:
string (str): A string representing a decimal float.
precisionBits (int): Number of precision bits.
Returns:
float: The transformed and rounded value.
Examples::
>>> import math
>>> s = '-0.61884'
>>> bits = 14
>>> f = strToFixedToFloat(s, precisionBits=bits)
>>> math.isclose(f, -0.61883544921875)
True
>>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
True
"""
value = float(string)
scale = 1 << precisionBits
return otRound(value * scale) / scale
def floatToFixedToStr(value, precisionBits):
"""Convert float to string with fixed-point rounding.
This uses the shortest decimal representation (ie. the least
number of fractional decimal digits) to represent the equivalent
fixed-point number with ``precisionBits`` fractional binary digits.
It uses nearestMultipleShortestRepr under the hood.
>>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
'-0.61884'
Args:
value (float): The float value to convert.
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
Returns:
str: A string representation of the value.
"""
scale = 1 << precisionBits
return nearestMultipleShortestRepr(value, factor=1.0 / scale)
def ensureVersionIsLong(value):
"""Ensure a table version is an unsigned long.
OpenType table version numbers are expressed as a single unsigned long
comprising of an unsigned short major version and unsigned short minor
version. This function detects if the value to be used as a version number
looks too small (i.e. is less than ``0x10000``), and converts it to
fixed-point using :func:`floatToFixed` if so.
Args:
value (Number): a candidate table version number.
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
if value < 0x10000:
newValue = floatToFixed(value, 16)
log.warning(
"Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
value,
newValue,
)
value = newValue
return value
def versionToFixed(value):
"""Ensure a table version number is fixed-point.
Args:
value (str): a candidate table version number.
Returns:
int: A table version number, possibly corrected to fixed-point.
"""
value = int(value, 0) if value.startswith("0") else float(value)
value = ensureVersionIsLong(value)
return value
venv\Lib\site-packages\fontTools\misc\intTools.py
__all__ = ["popCount", "bit_count", "bit_indices"]
try:
bit_count = int.bit_count
except AttributeError:
def bit_count(v):
return bin(v).count("1")
"""Return number of 1 bits (population count) of the absolute value of an integer.
See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count
"""
popCount = bit_count # alias
def bit_indices(v):
"""Return list of indices where bits are set, 0 being the index of the least significant bit.
>>> bit_indices(0b101)
[0, 2]
"""
return [i for i, b in enumerate(bin(v)[::-1]) if b == "1"]
from itertools import *
# Python 3.12:
if "batched" not in globals():
# https://docs.python.org/3/library/itertools.html#itertools.batched
def batched(iterable, n):
# batched('ABCDEFG', 3) --> ABC DEF G
if n < 1:
raise ValueError("n must be at least one")
it = iter(iterable)
while batch := tuple(islice(it, n)):
yield batch
from collections import UserDict, UserList
__all__ = ["LazyDict", "LazyList"]
class LazyDict(UserDict):
def __init__(self, data):
super().__init__()
self.data = data
def __getitem__(self, k):
v = self.data[k]
if callable(v):
v = v(k)
self.data[k] = v
return v
class LazyList(UserList):
def __getitem__(self, k):
if isinstance(k, slice):
indices = range(*k.indices(len(self)))
return [self[i] for i in indices]
v = self.data[k]
if callable(v):
v = v(k)
self.data[k] = v
return v
def __add__(self, other):
if isinstance(other, LazyList):
other = list(other)
elif isinstance(other, list):
pass
else:
return NotImplemented
return list(self) + other
def __radd__(self, other):
if not isinstance(other, list):
return NotImplemented
return other + list(self)
import sys
import logging
import timeit
from functools import wraps
from collections.abc import Mapping, Callable
import warnings
from logging import PercentStyle
# default logging level used by Timer class
TIME_LEVEL = logging.DEBUG
# per-level format strings used by the default formatter
# (the level name is not printed for INFO and DEBUG messages)
DEFAULT_FORMATS = {
"*": "%(levelname)s: %(message)s",
"INFO": "%(message)s",
"DEBUG": "%(message)s",
}
class LevelFormatter(logging.Formatter):
"""Log formatter with level-specific formatting.
Formatter class which optionally takes a dict of logging levels to
format strings, allowing to customise the log records appearance for
specific levels.
Attributes:
fmt: A dictionary mapping logging levels to format strings.
The ``*`` key identifies the default format string.
datefmt: As per py:class:`logging.Formatter`
style: As per py:class:`logging.Formatter`
>>> import sys
>>> handler = logging.StreamHandler(sys.stdout)
>>> formatter = LevelFormatter(
... fmt={
... '*': '[%(levelname)s] %(message)s',
... 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
... 'INFO': '%(message)s',
... })
>>> handler.setFormatter(formatter)
>>> log = logging.getLogger('test')
>>> log.setLevel(logging.DEBUG)
>>> log.addHandler(handler)
>>> log.debug('this uses a custom format string')
test [DEBUG] this uses a custom format string
>>> log.info('this also uses a custom format string')
this also uses a custom format string
>>> log.warning("this one uses the default format string")
[WARNING] this one uses the default format string
"""
def __init__(self, fmt=None, datefmt=None, style="%"):
if style != "%":
raise ValueError(
"only '%' percent style is supported in both python 2 and 3"
)
if fmt is None:
fmt = DEFAULT_FORMATS
if isinstance(fmt, str):
default_format = fmt
custom_formats = {}
elif isinstance(fmt, Mapping):
custom_formats = dict(fmt)
default_format = custom_formats.pop("*", None)
else:
raise TypeError("fmt must be a str or a dict of str: %r" % fmt)
super(LevelFormatter, self).__init__(default_format, datefmt)
self.default_format = self._fmt
self.custom_formats = {}
for level, fmt in custom_formats.items():
level = logging._checkLevel(level)
self.custom_formats[level] = fmt
def format(self, record):
if self.custom_formats:
fmt = self.custom_formats.get(record.levelno, self.default_format)
if self._fmt != fmt:
self._fmt = fmt
# for python >= 3.2, _style needs to be set if _fmt changes
if PercentStyle:
self._style = PercentStyle(fmt)
return super(LevelFormatter, self).format(record)
def configLogger(**kwargs):
"""A more sophisticated logging system configuation manager.
This is more or less the same as :py:func:`logging.basicConfig`,
with some additional options and defaults.
The default behaviour is to create a ``StreamHandler`` which writes to
sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add
the handler to the top-level library logger ("fontTools").
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
Args:
logger: Specifies the logger name or a Logger instance to be
configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``,
this function can be called multiple times to reconfigure a logger.
If the logger or any of its children already exists before the call is
made, they will be reset before the new configuration is applied.
filename: Specifies that a ``FileHandler`` be created, using the
specified filename, rather than a ``StreamHandler``.
filemode: Specifies the mode to open the file, if filename is
specified. (If filemode is unspecified, it defaults to ``a``).
format: Use the specified format string for the handler. This
argument also accepts a dictionary of format strings keyed by
level name, to allow customising the records appearance for
specific levels. The special ``'*'`` key is for 'any other' level.
datefmt: Use the specified date/time format.
level: Set the logger level to the specified level.
stream: Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with ``filename`` - if both
are present, ``stream`` is ignored.
handlers: If specified, this should be an iterable of already created
handlers, which will be added to the logger. Any handler in the
list which does not have a formatter assigned will be assigned the
formatter created in this function.
filters: If specified, this should be an iterable of already created
filters. If the ``handlers`` do not already have filters assigned,
these filters will be added to them.
propagate: All loggers have a ``propagate`` attribute which determines
whether to continue searching for handlers up the logging hierarchy.
If not provided, the "propagate" attribute will be set to ``False``.
"""
# using kwargs to enforce keyword-only arguments in py2.
handlers = kwargs.pop("handlers", None)
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError(
"'stream' and 'filename' should not be " "specified together"
)
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError(
"'stream' or 'filename' should not be "
"specified together with 'handlers'"
)
if handlers is None:
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", "a")
if filename:
h = logging.FileHandler(filename, mode)
else:
stream = kwargs.pop("stream", None)
h = logging.StreamHandler(stream)
handlers = [h]
# By default, the top-level library logger is configured.
logger = kwargs.pop("logger", "fontTools")
if not logger or isinstance(logger, str):
# empty "" or None means the 'root' logger
logger = logging.getLogger(logger)
# before (re)configuring, reset named logger and its children (if exist)
_resetExistingLoggers(parent=logger.name)
# use DEFAULT_FORMATS if 'format' is None
fs = kwargs.pop("format", None)
dfs = kwargs.pop("datefmt", None)
# XXX: '%' is the only format style supported on both py2 and 3
style = kwargs.pop("style", "%")
fmt = LevelFormatter(fs, dfs, style)
filters = kwargs.pop("filters", [])
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
if not h.filters:
for f in filters:
h.addFilter(f)
logger.addHandler(h)
if logger.name != "root":
# stop searching up the hierarchy for handlers
logger.propagate = kwargs.pop("propagate", False)
# set a custom severity level
level = kwargs.pop("level", None)
if level is not None:
logger.setLevel(level)
if kwargs:
keys = ", ".join(kwargs.keys())
raise ValueError("Unrecognised argument(s): %s" % keys)
def _resetExistingLoggers(parent="root"):
"""Reset the logger named 'parent' and all its children to their initial
state, if they already exist in the current configuration.
"""
root = logging.root
# get sorted list of all existing loggers
existing = sorted(root.manager.loggerDict.keys())
if parent == "root":
# all the existing loggers are children of 'root'
loggers_to_reset = [parent] + existing
elif parent not in existing:
# nothing to do
return
elif parent in existing:
loggers_to_reset = [parent]
# collect children, starting with the entry after parent name
i = existing.index(parent) + 1
prefixed = parent + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
loggers_to_reset.append(existing[i])
i += 1
for name in loggers_to_reset:
if name == "root":
root.setLevel(logging.WARNING)
for h in root.handlers[:]:
root.removeHandler(h)
for f in root.filters[:]:
root.removeFilters(f)
root.disabled = False
else:
logger = root.manager.loggerDict[name]
logger.level = logging.NOTSET
logger.handlers = []
logger.filters = []
logger.propagate = True
logger.disabled = False
class Timer(object):
"""Keeps track of overall time and split/lap times.
>>> import time
>>> timer = Timer()
>>> time.sleep(0.01)
>>> print("First lap:", timer.split())
First lap: ...
>>> time.sleep(0.02)
>>> print("Second lap:", timer.split())
Second lap: ...
>>> print("Overall time:", timer.time())
Overall time: ...
Can be used as a context manager inside with-statements.
>>> with Timer() as t:
... time.sleep(0.01)
>>> print("%0.3f seconds" % t.elapsed)
0... seconds
If initialised with a logger, it can log the elapsed time automatically
upon exiting the with-statement.
>>> import logging
>>> log = logging.getLogger("my-fancy-timer-logger")
>>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout)
>>> with Timer(log, 'do something'):
... time.sleep(0.01)
Took ... to do something
The same Timer instance, holding a reference to a logger, can be reused
in multiple with-statements, optionally with different messages or levels.
>>> timer = Timer(log)
>>> with timer():
... time.sleep(0.01)
elapsed time: ...s
>>> with timer('redo it', level=logging.INFO):
... time.sleep(0.02)
Took ... to redo it
It can also be used as a function decorator to log the time elapsed to run
the decorated function.
>>> @timer()
... def test1():
... time.sleep(0.01)
>>> @timer('run test 2', level=logging.INFO)
... def test2():
... time.sleep(0.02)
>>> test1()
Took ... to run 'test1'
>>> test2()
Took ... to run test 2
"""
# timeit.default_timer choses the most accurate clock for each platform
_time: Callable[[], float] = staticmethod(timeit.default_timer)
default_msg = "elapsed time: %(time).3fs"
default_format = "Took %(time).3fs to %(msg)s"
def __init__(self, logger=None, msg=None, level=None, start=None):
self.reset(start)
if logger is None:
for arg in ("msg", "level"):
if locals().get(arg) is not None:
raise ValueError("'%s' can't be specified without a 'logger'" % arg)
self.logger = logger
self.level = level if level is not None else TIME_LEVEL
self.msg = msg
def reset(self, start=None):
"""Reset timer to 'start_time' or the current time."""
if start is None:
self.start = self._time()
else:
self.start = start
self.last = self.start
self.elapsed = 0.0
def time(self):
"""Return the overall time (in seconds) since the timer started."""
return self._time() - self.start
def split(self):
"""Split and return the lap time (in seconds) in between splits."""
current = self._time()
self.elapsed = current - self.last
self.last = current
return self.elapsed
def formatTime(self, msg, time):
"""Format 'time' value in 'msg' and return formatted string.
If 'msg' contains a '%(time)' format string, try to use that.
Otherwise, use the predefined 'default_format'.
If 'msg' is empty or None, fall back to 'default_msg'.
"""
if not msg:
msg = self.default_msg
if msg.find("%(time)") < 0:
msg = self.default_format % {"msg": msg, "time": time}
else:
try:
msg = msg % {"time": time}
except (KeyError, ValueError):
pass # skip if the format string is malformed
return msg
def __enter__(self):
"""Start a new lap"""
self.last = self._time()
self.elapsed = 0.0
return self
def __exit__(self, exc_type, exc_value, traceback):
"""End the current lap. If timer has a logger, log the time elapsed,
using the format string in self.msg (or the default one).
"""
time = self.split()
if self.logger is None or exc_type:
# if there's no logger attached, or if any exception occurred in
# the with-statement, exit without logging the time
return
message = self.formatTime(self.msg, time)
# Allow log handlers to see the individual parts to facilitate things
# like a server accumulating aggregate stats.
msg_parts = {"msg": self.msg, "time": time}
self.logger.log(self.level, message, msg_parts)
def __call__(self, func_or_msg=None, **kwargs):
"""If the first argument is a function, return a decorator which runs
the wrapped function inside Timer's context manager.
Otherwise, treat the first argument as a 'msg' string and return an updated
Timer instance, referencing the same logger.
A 'level' keyword can also be passed to override self.level.
"""
if isinstance(func_or_msg, Callable):
func = func_or_msg
# use the function name when no explicit 'msg' is provided
if not self.msg:
self.msg = "run '%s'" % func.__name__
@wraps(func)
def wrapper(*args, **kwds):
with self:
return func(*args, **kwds)
return wrapper
else:
msg = func_or_msg or kwargs.get("msg")
level = kwargs.get("level", self.level)
return self.__class__(self.logger, msg, level)
def __float__(self):
return self.elapsed
def __int__(self):
return int(self.elapsed)
def __str__(self):
return "%.3f" % self.elapsed
class ChannelsFilter(logging.Filter):
"""Provides a hierarchical filter for log entries based on channel names.
Filters out records emitted from a list of enabled channel names,
including their children. It works the same as the ``logging.Filter``
class, but allows the user to specify multiple channel names.
>>> import sys
>>> handler = logging.StreamHandler(sys.stdout)
>>> handler.setFormatter(logging.Formatter("%(message)s"))
>>> filter = ChannelsFilter("A.B", "C.D")
>>> handler.addFilter(filter)
>>> root = logging.getLogger()
>>> root.addHandler(handler)
>>> root.setLevel(level=logging.DEBUG)
>>> logging.getLogger('A.B').debug('this record passes through')
this record passes through
>>> logging.getLogger('A.B.C').debug('records from children also pass')
records from children also pass
>>> logging.getLogger('C.D').debug('this one as well')
this one as well
>>> logging.getLogger('A.B.').debug('also this one')
also this one
>>> logging.getLogger('A.F').debug('but this one does not!')
>>> logging.getLogger('C.DE').debug('neither this one!')
"""
def __init__(self, *names):
self.names = names
self.num = len(names)
self.lengths = {n: len(n) for n in names}
def filter(self, record):
if self.num == 0:
return True
for name in self.names:
nlen = self.lengths[name]
if name == record.name:
return True
elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".":
return True
return False
class CapturingLogHandler(logging.Handler):
def __init__(self, logger, level):
super(CapturingLogHandler, self).__init__(level=level)
self.records = []
if isinstance(logger, str):
self.logger = logging.getLogger(logger)
else:
self.logger = logger
def __enter__(self):
self.original_disabled = self.logger.disabled
self.original_level = self.logger.level
self.original_propagate = self.logger.propagate
self.logger.addHandler(self)
self.logger.setLevel(self.level)
self.logger.disabled = False
self.logger.propagate = False
return self
def __exit__(self, type, value, traceback):
self.logger.removeHandler(self)
self.logger.setLevel(self.original_level)
self.logger.disabled = self.original_disabled
self.logger.propagate = self.original_propagate
return self
def emit(self, record):
self.records.append(record)
def assertRegex(self, regexp, msg=None):
import re
pattern = re.compile(regexp)
for r in self.records:
if pattern.search(r.getMessage()):
return True
if msg is None:
msg = "Pattern '%s' not found in logger records" % regexp
assert 0, msg
class LogMixin(object):
"""Mixin class that adds logging functionality to another class.
You can define a new class that subclasses from ``LogMixin`` as well as
other base classes through multiple inheritance.
All instances of that class will have a ``log`` property that returns
a ``logging.Logger`` named after their respective ``.``.
For example:
>>> class BaseClass(object):
... pass
>>> class MyClass(LogMixin, BaseClass):
... pass
>>> a = MyClass()
>>> isinstance(a.log, logging.Logger)
True
>>> print(a.log.name)
fontTools.misc.loggingTools.MyClass
>>> class AnotherClass(MyClass):
... pass
>>> b = AnotherClass()
>>> isinstance(b.log, logging.Logger)
True
>>> print(b.log.name)
fontTools.misc.loggingTools.AnotherClass
"""
@property
def log(self):
if not hasattr(self, "_log"):
name = ".".join((self.__class__.__module__, self.__class__.__name__))
self._log = logging.getLogger(name)
return self._log
def deprecateArgument(name, msg, category=UserWarning):
"""Raise a warning about deprecated function argument 'name'."""
warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
def deprecateFunction(msg, category=UserWarning):
"""Decorator to raise a warning when a deprecated function is called."""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
"%r is deprecated; %s" % (func.__name__, msg),
category=category,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
return decorator
if __name__ == "__main__":
import doctest
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
try:
import xattr
except ImportError:
xattr = None
def _reverseString(s):
s = list(s)
s.reverse()
return strjoin(s)
def getMacCreatorAndType(path):
"""Returns file creator and file type codes for a path.
Args:
path (str): A file path.
Returns:
A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
representing the file creator and the second representing the
file type.
"""
if xattr is not None:
try:
finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")
except (KeyError, IOError):
pass
else:
fileType = Tag(finderInfo[:4])
fileCreator = Tag(finderInfo[4:8])
return fileCreator, fileType
return None, None
def setMacCreatorAndType(path, fileCreator, fileType):
"""Set file creator and file type codes for a path.
Note that if the ``xattr`` module is not installed, no action is
taken but no error is raised.
Args:
path (str): A file path.
fileCreator: A four-character file creator tag.
fileType: A four-character file type tag.
"""
if xattr is not None:
from fontTools.misc.textTools import pad
if not all(len(s) == 4 for s in (fileCreator, fileType)):
raise TypeError("arg must be string of 4 chars")
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)
venv\Lib\site-packages\fontTools\misc\macRes.py
from io import BytesIO
import struct
from fontTools.misc import sstruct
from fontTools.misc.textTools import bytesjoin, tostr
from collections import OrderedDict
from collections.abc import MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
"""Reader for Mac OS resource forks.
Parses a resource fork and returns resources according to their type.
If run on OS X, this will open the resource fork in the filesystem.
Otherwise, it will open the file itself and attempt to read it as
though it were a resource fork.
The returned object can be indexed by type and iterated over,
returning in each case a list of py:class:`Resource` objects
representing all the resources of a certain type.
"""
def __init__(self, fileOrPath):
"""Open a file
Args:
fileOrPath: Either an object supporting a ``read`` method, an
``os.PathLike`` object, or a string.
"""
self._resources = OrderedDict()
if hasattr(fileOrPath, "read"):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + "/..namedfork/rsrc", "rb") as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, "rb") as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError("Failed to seek offset (reached EOF)")
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError("Cannot read resource (not enough data)")
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
(self.numTypes,) = struct.unpack(">H", numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item["type"], encoding="mac-roman")
refListOffset = absTypeListOffset + item["refListOffset"]
numRes = item["numRes"] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
"""A list of the types of resources in the resource fork."""
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
"""Returns a list of indices of resources of a given type."""
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes + 1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index - 1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding="mac-roman")
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
"""Represents a resource stored within a resource fork.
Attributes:
type: resource type.
data: resource data.
id: ID.
name: resource name.
attr: attributes.
"""
def __init__(
self, resType=None, resData=None, resID=None, resName=None, resAttr=None
):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
(self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
(dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
(nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))
(name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding="mac-roman")
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"}
class ps_object(object):
literal = 1
access = 0
value = None
def __init__(self, value):
self.value = value
self.type = self.__class__.__name__[3:] + "type"
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value))
class ps_operator(ps_object):
literal = 0
def __init__(self, name, function):
self.name = name
self.function = function
self.type = self.__class__.__name__[3:] + "type"
def __repr__(self):
return "" % self.name
class ps_procedure(ps_object):
literal = 0
def __repr__(self):
return ""
def __str__(self):
psstring = "{"
for i in range(len(self.value)):
if i:
psstring = psstring + " " + str(self.value[i])
else:
psstring = psstring + str(self.value[i])
return psstring + "}"
class ps_name(ps_object):
literal = 0
def __str__(self):
if self.literal:
return "/" + self.value
else:
return self.value
class ps_literal(ps_object):
def __str__(self):
return "/" + self.value
class ps_array(ps_object):
def __str__(self):
psstring = "["
for i in range(len(self.value)):
item = self.value[i]
access = _accessstrings[item.access]
if access:
access = " " + access
if i:
psstring = psstring + " " + str(item) + access
else:
psstring = psstring + str(item) + access
return psstring + "]"
def __repr__(self):
return ""
_type1_pre_eexec_order = [
"FontInfo",
"FontName",
"Encoding",
"PaintType",
"FontType",
"FontMatrix",
"FontBBox",
"UniqueID",
"Metrics",
"StrokeWidth",
]
_type1_fontinfo_order = [
"version",
"Notice",
"FullName",
"FamilyName",
"Weight",
"ItalicAngle",
"isFixedPitch",
"UnderlinePosition",
"UnderlineThickness",
]
_type1_post_eexec_order = ["Private", "CharStrings", "FID"]
def _type1_item_repr(key, value):
psstring = ""
access = _accessstrings[value.access]
if access:
access = access + " "
if key == "CharStrings":
psstring = psstring + "/%s %s def\n" % (
key,
_type1_CharString_repr(value.value),
)
elif key == "Encoding":
psstring = psstring + _type1_Encoding_repr(value, access)
else:
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
return psstring
def _type1_Encoding_repr(encoding, access):
encoding = encoding.value
psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
for i in range(256):
name = encoding[i].value
if name != ".notdef":
psstring = psstring + "dup %d /%s put\n" % (i, name)
return psstring + access + "def\n"
def _type1_CharString_repr(charstrings):
items = sorted(charstrings.items())
return "xxx"
class ps_font(ps_object):
def __str__(self):
psstring = "%d dict dup begin\n" % len(self.value)
for key in _type1_pre_eexec_order:
try:
value = self.value[key]
except KeyError:
pass
else:
psstring = psstring + _type1_item_repr(key, value)
items = sorted(self.value.items())
for key, value in items:
if key not in _type1_pre_eexec_order + _type1_post_eexec_order:
psstring = psstring + _type1_item_repr(key, value)
psstring = psstring + "currentdict end\ncurrentfile eexec\ndup "
for key in _type1_post_eexec_order:
try:
value = self.value[key]
except KeyError:
pass
else:
psstring = psstring + _type1_item_repr(key, value)
return (
psstring
+ "dup/FontName get exch definefont pop\nmark currentfile closefile\n"
+ 8 * (64 * "0" + "\n")
+ "cleartomark"
+ "\n"
)
def __repr__(self):
return ""
class ps_file(ps_object):
pass
class ps_dict(ps_object):
def __str__(self):
psstring = "%d dict dup begin\n" % len(self.value)
items = sorted(self.value.items())
for key, value in items:
access = _accessstrings[value.access]
if access:
access = access + " "
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
return psstring + "end "
def __repr__(self):
return ""
class ps_mark(ps_object):
def __init__(self):
self.value = "mark"
self.type = self.__class__.__name__[3:] + "type"
class ps_procmark(ps_object):
def __init__(self):
self.value = "procmark"
self.type = self.__class__.__name__[3:] + "type"
class ps_null(ps_object):
def __init__(self):
self.type = self.__class__.__name__[3:] + "type"
class ps_boolean(ps_object):
def __str__(self):
if self.value:
return "true"
else:
return "false"
class ps_string(ps_object):
def __str__(self):
return "(%s)" % repr(self.value)[1:-1]
class ps_integer(ps_object):
def __str__(self):
return repr(self.value)
class ps_real(ps_object):
def __str__(self):
return repr(self.value)
class PSOperators(object):
def ps_def(self):
obj = self.pop()
name = self.pop()
self.dictstack[-1][name.value] = obj
def ps_bind(self):
proc = self.pop("proceduretype")
self.proc_bind(proc)
self.push(proc)
def proc_bind(self, proc):
for i in range(len(proc.value)):
item = proc.value[i]
if item.type == "proceduretype":
self.proc_bind(item)
else:
if not item.literal:
try:
obj = self.resolve_name(item.value)
except:
pass
else:
if obj.type == "operatortype":
proc.value[i] = obj
def ps_exch(self):
if len(self.stack) < 2:
raise RuntimeError("stack underflow")
obj1 = self.pop()
obj2 = self.pop()
self.push(obj1)
self.push(obj2)
def ps_dup(self):
if not self.stack:
raise RuntimeError("stack underflow")
self.push(self.stack[-1])
def ps_exec(self):
obj = self.pop()
if obj.type == "proceduretype":
self.call_procedure(obj)
else:
self.handle_object(obj)
def ps_count(self):
self.push(ps_integer(len(self.stack)))
def ps_eq(self):
any1 = self.pop()
any2 = self.pop()
self.push(ps_boolean(any1.value == any2.value))
def ps_ne(self):
any1 = self.pop()
any2 = self.pop()
self.push(ps_boolean(any1.value != any2.value))
def ps_cvx(self):
obj = self.pop()
obj.literal = 0
self.push(obj)
def ps_matrix(self):
matrix = [
ps_real(1.0),
ps_integer(0),
ps_integer(0),
ps_real(1.0),
ps_integer(0),
ps_integer(0),
]
self.push(ps_array(matrix))
def ps_string(self):
num = self.pop("integertype").value
self.push(ps_string("\0" * num))
def ps_type(self):
obj = self.pop()
self.push(ps_string(obj.type))
def ps_store(self):
value = self.pop()
key = self.pop()
name = key.value
for i in range(len(self.dictstack) - 1, -1, -1):
if name in self.dictstack[i]:
self.dictstack[i][name] = value
break
self.dictstack[-1][name] = value
def ps_where(self):
name = self.pop()
# XXX
self.push(ps_boolean(0))
def ps_systemdict(self):
self.push(ps_dict(self.dictstack[0]))
def ps_userdict(self):
self.push(ps_dict(self.dictstack[1]))
def ps_currentdict(self):
self.push(ps_dict(self.dictstack[-1]))
def ps_currentfile(self):
self.push(ps_file(self.tokenizer))
def ps_eexec(self):
f = self.pop("filetype").value
f.starteexec()
def ps_closefile(self):
f = self.pop("filetype").value
f.skipwhite()
f.stopeexec()
def ps_cleartomark(self):
obj = self.pop()
while obj != self.mark:
obj = self.pop()
def ps_readstring(self, ps_boolean=ps_boolean, len=len):
s = self.pop("stringtype")
oldstr = s.value
f = self.pop("filetype")
# pad = file.value.read(1)
# for StringIO, this is faster
f.value.pos = f.value.pos + 1
newstr = f.value.read(len(oldstr))
s.value = newstr
self.push(s)
self.push(ps_boolean(len(oldstr) == len(newstr)))
def ps_known(self):
key = self.pop()
d = self.pop("dicttype", "fonttype")
self.push(ps_boolean(key.value in d.value))
def ps_if(self):
proc = self.pop("proceduretype")
if self.pop("booleantype").value:
self.call_procedure(proc)
def ps_ifelse(self):
proc2 = self.pop("proceduretype")
proc1 = self.pop("proceduretype")
if self.pop("booleantype").value:
self.call_procedure(proc1)
else:
self.call_procedure(proc2)
def ps_readonly(self):
obj = self.pop()
if obj.access < 1:
obj.access = 1
self.push(obj)
def ps_executeonly(self):
obj = self.pop()
if obj.access < 2:
obj.access = 2
self.push(obj)
def ps_noaccess(self):
obj = self.pop()
if obj.access < 3:
obj.access = 3
self.push(obj)
def ps_not(self):
obj = self.pop("booleantype", "integertype")
if obj.type == "booleantype":
self.push(ps_boolean(not obj.value))
else:
self.push(ps_integer(~obj.value))
def ps_print(self):
str = self.pop("stringtype")
print("PS output --->", str.value)
def ps_anchorsearch(self):
seek = self.pop("stringtype")
s = self.pop("stringtype")
seeklen = len(seek.value)
if s.value[:seeklen] == seek.value:
self.push(ps_string(s.value[seeklen:]))
self.push(seek)
self.push(ps_boolean(1))
else:
self.push(s)
self.push(ps_boolean(0))
def ps_array(self):
num = self.pop("integertype")
array = ps_array([None] * num.value)
self.push(array)
def ps_astore(self):
array = self.pop("arraytype")
for i in range(len(array.value) - 1, -1, -1):
array.value[i] = self.pop()
self.push(array)
def ps_load(self):
name = self.pop()
self.push(self.resolve_name(name.value))
def ps_put(self):
obj1 = self.pop()
obj2 = self.pop()
obj3 = self.pop("arraytype", "dicttype", "stringtype", "proceduretype")
tp = obj3.type
if tp == "arraytype" or tp == "proceduretype":
obj3.value[obj2.value] = obj1
elif tp == "dicttype":
obj3.value[obj2.value] = obj1
elif tp == "stringtype":
index = obj2.value
obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index + 1 :]
def ps_get(self):
obj1 = self.pop()
if obj1.value == "Encoding":
pass
obj2 = self.pop(
"arraytype", "dicttype", "stringtype", "proceduretype", "fonttype"
)
tp = obj2.type
if tp in ("arraytype", "proceduretype"):
self.push(obj2.value[obj1.value])
elif tp in ("dicttype", "fonttype"):
self.push(obj2.value[obj1.value])
elif tp == "stringtype":
self.push(ps_integer(ord(obj2.value[obj1.value])))
else:
assert False, "shouldn't get here"
def ps_getinterval(self):
obj1 = self.pop("integertype")
obj2 = self.pop("integertype")
obj3 = self.pop("arraytype", "stringtype")
tp = obj3.type
if tp == "arraytype":
self.push(ps_array(obj3.value[obj2.value : obj2.value + obj1.value]))
elif tp == "stringtype":
self.push(ps_string(obj3.value[obj2.value : obj2.value + obj1.value]))
def ps_putinterval(self):
obj1 = self.pop("arraytype", "stringtype")
obj2 = self.pop("integertype")
obj3 = self.pop("arraytype", "stringtype")
tp = obj3.type
if tp == "arraytype":
obj3.value[obj2.value : obj2.value + len(obj1.value)] = obj1.value
elif tp == "stringtype":
newstr = obj3.value[: obj2.value]
newstr = newstr + obj1.value
newstr = newstr + obj3.value[obj2.value + len(obj1.value) :]
obj3.value = newstr
def ps_cvn(self):
self.push(ps_name(self.pop("stringtype").value))
def ps_index(self):
n = self.pop("integertype").value
if n < 0:
raise RuntimeError("index may not be negative")
self.push(self.stack[-1 - n])
def ps_for(self):
proc = self.pop("proceduretype")
limit = self.pop("integertype", "realtype").value
increment = self.pop("integertype", "realtype").value
i = self.pop("integertype", "realtype").value
while 1:
if increment > 0:
if i > limit:
break
else:
if i < limit:
break
if type(i) == type(0.0):
self.push(ps_real(i))
else:
self.push(ps_integer(i))
self.call_procedure(proc)
i = i + increment
def ps_forall(self):
proc = self.pop("proceduretype")
obj = self.pop("arraytype", "stringtype", "dicttype")
tp = obj.type
if tp == "arraytype":
for item in obj.value:
self.push(item)
self.call_procedure(proc)
elif tp == "stringtype":
for item in obj.value:
self.push(ps_integer(ord(item)))
self.call_procedure(proc)
elif tp == "dicttype":
for key, value in obj.value.items():
self.push(ps_name(key))
self.push(value)
self.call_procedure(proc)
def ps_definefont(self):
font = self.pop("dicttype")
name = self.pop()
font = ps_font(font.value)
self.dictstack[0]["FontDirectory"].value[name.value] = font
self.push(font)
def ps_findfont(self):
name = self.pop()
font = self.dictstack[0]["FontDirectory"].value[name.value]
self.push(font)
def ps_pop(self):
self.pop()
def ps_dict(self):
self.pop("integertype")
self.push(ps_dict({}))
def ps_begin(self):
self.dictstack.append(self.pop("dicttype").value)
def ps_end(self):
if len(self.dictstack) > 2:
del self.dictstack[-1]
else:
raise RuntimeError("dictstack underflow")
notdef = ".notdef"
from fontTools.encodings.StandardEncoding import StandardEncoding
ps_StandardEncoding = list(map(ps_name, StandardEncoding))
venv\Lib\site-packages\fontTools\misc\py23.py
"""Python 2/3 compat layer leftovers."""
import decimal as _decimal
import math as _math
import warnings
from contextlib import redirect_stderr, redirect_stdout
from io import BytesIO
from io import StringIO as UnicodeIO
from types import SimpleNamespace
from .textTools import Tag, bytechr, byteord, bytesjoin, strjoin, tobytes, tostr
warnings.warn(
"The py23 module has been deprecated and will be removed in a future release. "
"Please update your code.",
DeprecationWarning,
)
__all__ = [
"basestring",
"bytechr",
"byteord",
"BytesIO",
"bytesjoin",
"open",
"Py23Error",
"range",
"RecursionError",
"round",
"SimpleNamespace",
"StringIO",
"strjoin",
"Tag",
"tobytes",
"tostr",
"tounicode",
"unichr",
"unicode",
"UnicodeIO",
"xrange",
"zip",
]
class Py23Error(NotImplementedError):
pass
RecursionError = RecursionError
StringIO = UnicodeIO
basestring = str
isclose = _math.isclose
isfinite = _math.isfinite
open = open
range = range
round = round3 = round
unichr = chr
unicode = str
zip = zip
tounicode = tostr
def xrange(*args, **kwargs):
raise Py23Error("'xrange' is not defined. Use 'range' instead.")
def round2(number, ndigits=None):
"""
Implementation of Python 2 built-in round() function.
Rounds a number to a given precision in decimal digits (default
0 digits). The result is a floating point number. Values are rounded
to the closest multiple of 10 to the power minus ndigits; if two
multiples are equally close, rounding is done away from 0.
ndigits may be negative.
See Python 2 documentation:
https://docs.python.org/2/library/functions.html?highlight=round#round
"""
if ndigits is None:
ndigits = 0
if ndigits < 0:
exponent = 10 ** (-ndigits)
quotient, remainder = divmod(number, exponent)
if remainder >= exponent // 2 and number >= 0:
quotient += 1
return float(quotient * exponent)
else:
exponent = _decimal.Decimal("10") ** (-ndigits)
d = _decimal.Decimal.from_float(number).quantize(
exponent, rounding=_decimal.ROUND_HALF_UP
)
return float(d)
"""
Various round-to-integer helpers.
"""
import math
import functools
import logging
log = logging.getLogger(__name__)
__all__ = [
"noRound",
"otRound",
"maybeRound",
"roundFunc",
"nearestMultipleShortestRepr",
]
def noRound(value):
return value
def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations `_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
Args:
value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
raise ValueError("Rounding tolerance must be positive")
if tolerance == 0:
return noRound
if tolerance >= 0.5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
def nearestMultipleShortestRepr(value: float, factor: float) -> str:
"""Round to nearest multiple of factor and return shortest decimal representation.
This chooses the float that is closer to a multiple of the given factor while
having the shortest decimal representation (the least number of fractional decimal
digits).
For example, given the following:
>>> nearestMultipleShortestRepr(-0.61883544921875, 1.0/(1<<14))
'-0.61884'
Useful when you need to serialize or print a fixed-point number (or multiples
thereof, such as F2Dot14 fractions of 180 degrees in COLRv1 PaintRotate) in
a human-readable form.
Args:
value (value): The value to be rounded and serialized.
factor (float): The value which the result is a close multiple of.
Returns:
str: A compact string representation of the value.
"""
if not value:
return "0.0"
value = otRound(value / factor) * factor
eps = 0.5 * factor
lo = value - eps
hi = value + eps
# If the range of valid choices spans an integer, return the integer.
if int(lo) != int(hi):
return str(float(round(value)))
fmt = "%.8f"
lo = fmt % lo
hi = fmt % hi
assert len(lo) == len(hi) and lo != hi
for i in range(len(lo)):
if lo[i] != hi[i]:
break
period = lo.find(".")
assert period < i
fmt = "%%.%df" % (i - period)
return fmt % value
venv\Lib\site-packages\fontTools\misc\sstruct.py
"""sstruct.py -- SuperStruct
Higher level layer on top of the struct module, enabling to
bind names to struct elements. The interface is similar to
struct, except the objects passed and returned are not tuples
(or argument lists), but dictionaries or instances.
Just like struct, we use fmt strings to describe a data
structure, except we use one line per element. Lines are
separated by newlines or semi-colons. Each line contains
either one of the special struct characters ('@', '=', '<',
'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f').
Repetitions, like the struct module offers them are not useful
in this context, except for fixed length strings (eg. 'myInt:5h'
is not allowed but 'myString:5s' is). The 'x' fmt character
(pad byte) is treated as 'special', since it is by definition
anonymous. Extra whitespace is allowed everywhere.
The sstruct module offers one feature that the "normal" struct
module doesn't: support for fixed point numbers. These are spelled
as "n.mF", where n is the number of bits before the point, and m
the number of bits after the point. Fixed point numbers get
converted to floats.
pack(fmt, object):
'object' is either a dictionary or an instance (or actually
anything that has a __dict__ attribute). If it is a dictionary,
its keys are used for names. If it is an instance, it's
attributes are used to grab struct elements from. Returns
a string containing the data.
unpack(fmt, data, object=None)
If 'object' is omitted (or None), a new dictionary will be
returned. If 'object' is a dictionary, it will be used to add
struct elements to. If it is an instance (or in fact anything
that has a __dict__ attribute), an attribute will be added for
each struct element. In the latter two cases, 'object' itself
is returned.
unpack2(fmt, data, object=None)
Convenience function. Same as unpack, except data may be longer
than needed. The returned value is a tuple: (object, leftoverdata).
calcsize(fmt)
like struct.calcsize(), but uses our own fmt strings:
it returns the size of the data in bytes.
"""
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
from fontTools.misc.textTools import tobytes, tostr
import struct
import re
__version__ = "1.2"
__copyright__ = "Copyright 1998, Just van Rossum "
class Error(Exception):
pass
def pack(fmt, obj):
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
elements = []
if not isinstance(obj, dict):
obj = obj.__dict__
for name in names.keys():
value = obj[name]
if name in fixes:
# fixed point conversion
value = fl2fi(value, fixes[name])
elif isinstance(value, str):
value = tobytes(value)
elements.append(value)
# Check it fits
try:
struct.pack(names[name], value)
except Exception as e:
raise ValueError(
"Value %s does not fit in format %s for %s" % (value, names[name], name)
) from e
data = struct.pack(*(formatstring,) + tuple(elements))
return data
def unpack(fmt, data, obj=None):
if obj is None:
obj = {}
data = tobytes(data)
formatstring, names, fixes = getformat(fmt)
if isinstance(obj, dict):
d = obj
else:
d = obj.__dict__
elements = struct.unpack(formatstring, data)
for i, name in enumerate(names.keys()):
value = elements[i]
if name in fixes:
# fixed point conversion
value = fi2fl(value, fixes[name])
elif isinstance(value, bytes):
try:
value = tostr(value)
except UnicodeDecodeError:
pass
d[name] = value
return obj
def unpack2(fmt, data, obj=None):
length = calcsize(fmt)
return unpack(fmt, data[:length], obj), data[length:]
def calcsize(fmt):
formatstring, names, fixes = getformat(fmt)
return struct.calcsize(formatstring)
# matches "name:formatchar" (whitespace is allowed)
_elementRE = re.compile(
r"\s*" # whitespace
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
r"\s*:\s*" # whitespace : whitespace
r"([xcbB?hHiIlLqQfd]|" # formatchar...
r"[0-9]+[ps]|" # ...formatchar...
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
r"\s*" # whitespace
r"(#.*)?$" # [comment] + end of string
)
# matches the special struct fmt chars and 'x' (pad byte)
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment
_emptyRE = re.compile(r"\s*(#.*)?$")
_fixedpointmappings = {8: "b", 16: "h", 32: "l"}
_formatcache = {}
def getformat(fmt, keep_pad_byte=False):
fmt = tostr(fmt, encoding="ascii")
try:
formatstring, names, fixes = _formatcache[fmt]
except KeyError:
lines = re.split("[\n;]", fmt)
formatstring = ""
names = {}
fixes = {}
for line in lines:
if _emptyRE.match(line):
continue
m = _extraRE.match(line)
if m:
formatchar = m.group(1)
if formatchar != "x" and formatstring:
raise Error("a special fmt char must be first")
else:
m = _elementRE.match(line)
if not m:
raise Error("syntax error in fmt: '%s'" % line)
name = m.group(1)
formatchar = m.group(2)
if keep_pad_byte or formatchar != "x":
names[name] = formatchar
if m.group(3):
# fixed point
before = int(m.group(3))
after = int(m.group(4))
bits = before + after
if bits not in [8, 16, 32]:
raise Error("fixed point must be 8, 16 or 32 bits long")
formatchar = _fixedpointmappings[bits]
names[name] = formatchar
assert m.group(5) == "F"
fixes[name] = after
formatstring += formatchar
_formatcache[fmt] = formatstring, names, fixes
return formatstring, names, fixes
def _test():
fmt = """
# comments are allowed
> # big endian (see documentation for struct)
# empty lines are allowed:
ashort: h
along: l
abyte: b # a byte
achar: c
astr: 5s
afloat: f; adouble: d # multiple "statements" are allowed
afixed: 16.16F
abool: ?
apad: x
"""
print("size:", calcsize(fmt))
class foo(object):
pass
i = foo()
i.ashort = 0x7FFF
i.along = 0x7FFFFFFF
i.abyte = 0x7F
i.achar = "a"
i.astr = "12345"
i.afloat = 0.5
i.adouble = 0.5
i.afixed = 1.5
i.abool = True
data = pack(fmt, i)
print("data:", repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
if __name__ == "__main__":
_test()
venv\Lib\site-packages\fontTools\misc\symfont.py
from fontTools.pens.basePen import BasePen
from functools import partial
from itertools import count
import sympy as sp
import sys
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
t, x, y = sp.symbols("t x y", real=True)
c = sp.symbols("c", real=False) # Complex representation instead of x/y
X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
# Cubic Bernstein basis functions
BinomialCoefficient = [(1, 0)]
for i in range(1, n + 1):
last = BinomialCoefficient[-1]
this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,)
BinomialCoefficient.append(this)
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
del last, this
BernsteinPolynomial = tuple(
tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
for n, coeffs in enumerate(BinomialCoefficient)
)
BezierCurve = tuple(
tuple(
sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
for j in range(2)
)
for n, bernsteins in enumerate(BernsteinPolynomial)
)
BezierCurveC = tuple(
sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
for n, bernsteins in enumerate(BernsteinPolynomial)
)
def green(f, curveXY):
f = -sp.integrate(sp.sympify(f), y)
f = f.subs({x: curveXY[0], y: curveXY[1]})
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
return f
class _BezierFuncsLazy(dict):
def __init__(self, symfunc):
self._symfunc = symfunc
self._bezfuncs = {}
def __missing__(self, i):
args = ["p%d" % d for d in range(i + 1)]
f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
return sp.lambdify(args, f)
class GreenPen(BasePen):
_BezierFuncs = {}
@classmethod
def _getGreenBezierFuncs(celf, func):
funcstr = str(func)
if not funcstr in celf._BezierFuncs:
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
return celf._BezierFuncs[funcstr]
def __init__(self, func, glyphset=None):
BasePen.__init__(self, glyphset)
self._funcs = self._getGreenBezierFuncs(func)
self.value = 0
def _moveTo(self, p0):
self._startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self._startPoint:
self._lineTo(self._startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self._startPoint:
# Green theorem is not defined on open contours.
raise NotImplementedError
def _lineTo(self, p1):
p0 = self._getCurrentPoint()
self.value += self._funcs[1](p0, p1)
def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint()
self.value += self._funcs[2](p0, p1, p2)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
# Sample pens.
# Do not use this in real code.
# Use fontTools.pens.momentsPen.MomentsPen instead.
AreaPen = partial(GreenPen, func=1)
MomentXPen = partial(GreenPen, func=x)
MomentYPen = partial(GreenPen, func=y)
MomentXXPen = partial(GreenPen, func=x * x)
MomentYYPen = partial(GreenPen, func=y * y)
MomentXYPen = partial(GreenPen, func=x * y)
def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
if docstring is not None:
print('"""%s"""' % docstring)
print(
"""from fontTools.pens.basePen import BasePen, OpenContourError
try:
import cython
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = cython.compiled
__all__ = ["%s"]
class %s(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
"""
% (penName, penName),
file=file,
)
for name, f in funcs:
print(" self.%s = 0" % name, file=file)
print(
"""
def _moveTo(self, p0):
self._startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self._startPoint:
self._lineTo(self._startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self._startPoint:
raise OpenContourError(
"Glyph statistics is not defined on open contours."
)
""",
end="",
file=file,
)
for n in (1, 2, 3):
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
greens = [green(f, BezierCurve[n]) for name, f in funcs]
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
defs, exprs = sp.cse(
greens,
optimizations="basic",
symbols=(sp.Symbol("r%d" % i) for i in count()),
)
print()
for name, value in defs:
print(" @cython.locals(%s=cython.double)" % name, file=file)
if n == 1:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
""",
file=file,
)
elif n == 2:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
def _qCurveToOne(self, p1, p2):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
""",
file=file,
)
elif n == 3:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@cython.locals(x3=cython.double, y3=cython.double)
def _curveToOne(self, p1, p2, p3):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
x3,y3 = p3
""",
file=file,
)
for name, value in defs:
print(" %s = %s" % (name, value), file=file)
print(file=file)
for name, value in zip([f[0] for f in funcs], exprs):
print(" self.%s += %s" % (name, value), file=file)
print(
"""
if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('%s', ["""
% penName,
file=file,
)
for name, f in funcs:
print(" ('%s', %s)," % (name, str(f)), file=file)
print(" ])", file=file)
if __name__ == "__main__":
import sys
if sys.argv[1:]:
penName = sys.argv[1]
funcs = [(name, eval(f)) for name, f in zip(sys.argv[2::2], sys.argv[3::2])]
printGreenPen(penName, funcs, file=sys.stdout)
"""Helpers for writing unit tests."""
from collections.abc import Iterable
from io import BytesIO
import os
import re
import shutil
import sys
import tempfile
from unittest import TestCase as _TestCase
from fontTools.config import Config
from fontTools.misc.textTools import tobytes
from fontTools.misc.xmlWriter import XMLWriter
def parseXML(xmlSnippet):
"""Parses a snippet of XML.
Input can be either a single string (unicode or UTF-8 bytes), or a
a sequence of strings.
The result is in the same format that would be returned by
XMLReader, but the parser imposes no constraints on the root
element so it can be called on small snippets of TTX files.
"""
# To support snippets with multiple elements, we add a fake root.
reader = TestXMLReader_()
xml = b""
if isinstance(xmlSnippet, bytes):
xml += xmlSnippet
elif isinstance(xmlSnippet, str):
xml += tobytes(xmlSnippet, "utf-8")
elif isinstance(xmlSnippet, Iterable):
xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
else:
raise TypeError(
"expected string or sequence of strings; found %r"
% type(xmlSnippet).__name__
)
xml += b""
reader.parser.Parse(xml, 1)
return reader.root[2]
def parseXmlInto(font, parseInto, xmlSnippet):
parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)]
for name, attrs, content in parsed_xml:
parseInto.fromXML(name, attrs, content, font)
if hasattr(parseInto, "populateDefaults"):
parseInto.populateDefaults()
return parseInto
class FakeFont:
def __init__(self, glyphs):
self.glyphOrder_ = glyphs
self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)}
self.lazy = False
self.tables = {}
self.cfg = Config()
def __contains__(self, tag):
return tag in self.tables
def __getitem__(self, tag):
return self.tables[tag]
def __setitem__(self, tag, table):
self.tables[tag] = table
def get(self, tag, default=None):
return self.tables.get(tag, default)
def getGlyphID(self, name):
return self.reverseGlyphOrderDict_[name]
def getGlyphIDMany(self, lst):
return [self.getGlyphID(gid) for gid in lst]
def getGlyphName(self, glyphID):
if glyphID < len(self.glyphOrder_):
return self.glyphOrder_[glyphID]
else:
return "glyph%.5d" % glyphID
def getGlyphNameMany(self, lst):
return [self.getGlyphName(gid) for gid in lst]
def getGlyphOrder(self):
return self.glyphOrder_
def getReverseGlyphMap(self):
return self.reverseGlyphOrderDict_
def getGlyphNames(self):
return sorted(self.getGlyphOrder())
class TestXMLReader_(object):
def __init__(self):
from xml.parsers.expat import ParserCreate
self.parser = ParserCreate()
self.parser.StartElementHandler = self.startElement_
self.parser.EndElementHandler = self.endElement_
self.parser.CharacterDataHandler = self.addCharacterData_
self.root = None
self.stack = []
def startElement_(self, name, attrs):
element = (name, attrs, [])
if self.stack:
self.stack[-1][2].append(element)
else:
self.root = element
self.stack.append(element)
def endElement_(self, name):
self.stack.pop()
def addCharacterData_(self, data):
self.stack[-1][2].append(data)
def makeXMLWriter(newlinestr="\n"):
# don't write OS-specific new lines
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
# erase XML declaration
writer.file.seek(0)
writer.file.truncate()
return writer
def getXML(func, ttFont=None):
"""Call the passed toXML function and return the written content as a
list of lines (unicode strings).
Result is stripped of XML declaration and OS-specific newline characters.
"""
writer = makeXMLWriter()
func(writer, ttFont)
xml = writer.file.getvalue().decode("utf-8")
# toXML methods must always end with a writer.newline()
assert xml.endswith("\n")
return xml.splitlines()
def stripVariableItemsFromTTX(
string: str,
ttLibVersion: bool = True,
checkSumAdjustment: bool = True,
modified: bool = True,
created: bool = True,
sfntVersion: bool = False, # opt-in only
) -> str:
"""Strip stuff like ttLibVersion, checksums, timestamps, etc. from TTX dumps."""
# ttlib changes with the fontTools version
if ttLibVersion:
string = re.sub(' ttLibVersion="[^"]+"', "", string)
# sometimes (e.g. some subsetter tests) we don't care whether it's OTF or TTF
if sfntVersion:
string = re.sub(' sfntVersion="[^"]+"', "", string)
# head table checksum and creation and mod date changes with each save.
if checkSumAdjustment:
string = re.sub('', "", string)
if modified:
string = re.sub('', "", string)
if created:
string = re.sub('', "", string)
return string
class MockFont(object):
"""A font-like object that automatically adds any looked up glyphname
to its glyphOrder."""
def __init__(self):
self._glyphOrder = [".notdef"]
class AllocatingDict(dict):
def __missing__(reverseDict, key):
self._glyphOrder.append(key)
gid = len(reverseDict)
reverseDict[key] = gid
return gid
self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
self.lazy = False
def getGlyphID(self, glyph):
gid = self._reverseGlyphOrder[glyph]
return gid
def getReverseGlyphMap(self):
return self._reverseGlyphOrder
def getGlyphName(self, gid):
return self._glyphOrder[gid]
def getGlyphOrder(self):
return self._glyphOrder
class TestCase(_TestCase):
def __init__(self, methodName):
_TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
class DataFilesHandler(TestCase):
def setUp(self):
self.tempdir = None
self.num_tempfiles = 0
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def getpath(self, testfile):
folder = os.path.dirname(sys.modules[self.__module__].__file__)
return os.path.join(folder, "data", testfile)
def temp_dir(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp()
def temp_font(self, font_path, file_name):
self.temp_dir()
temppath = os.path.join(self.tempdir, file_name)
shutil.copy2(font_path, temppath)
return temppath
"""fontTools.misc.textTools.py -- miscellaneous routines."""
import ast
import string
# alias kept for backward compatibility
safeEval = ast.literal_eval
class Tag(str):
@staticmethod
def transcode(blob):
if isinstance(blob, bytes):
blob = blob.decode("latin-1")
return blob
def __new__(self, content):
return str.__new__(self, self.transcode(content))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return str.__eq__(self, self.transcode(other))
def __hash__(self):
return str.__hash__(self)
def tobytes(self):
return self.encode("latin-1")
def readHex(content):
"""Convert a list of hex strings to binary data."""
return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
def deHexStr(hexdata):
"""Convert a hex string to binary data."""
hexdata = strjoin(hexdata.split())
if len(hexdata) % 2:
hexdata = hexdata + "0"
data = []
for i in range(0, len(hexdata), 2):
data.append(bytechr(int(hexdata[i : i + 2], 16)))
return bytesjoin(data)
def hexStr(data):
"""Convert binary data to a hex string."""
h = string.hexdigits
r = ""
for c in data:
i = byteord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
def num2binary(l, bits=32):
items = []
binary = ""
for i in range(bits):
if l & 0x1:
binary = "1" + binary
else:
binary = "0" + binary
l = l >> 1
if not ((i + 1) % 8):
items.append(binary)
binary = ""
if binary:
items.append(binary)
items.reverse()
assert l in (0, -1), "number doesn't fit in number of bits"
return " ".join(items)
def binary2num(bin):
bin = strjoin(bin.split())
l = 0
for digit in bin:
l = l << 1
if digit != "0":
l = l | 0x1
return l
def caselessSort(alist):
"""Return a sorted copy of a list. If there are only strings
in the list, it will not consider case.
"""
try:
return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError:
return sorted(alist)
def pad(data, size):
r"""Pad byte string 'data' with null bytes until its length is a
multiple of 'size'.
>>> len(pad(b'abcd', 4))
4
>>> len(pad(b'abcde', 2))
6
>>> len(pad(b'abcde', 4))
8
>>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
True
"""
data = tobytes(data)
if size > 1:
remainder = len(data) % size
if remainder:
data += b"\0" * (size - remainder)
return data
def tostr(s, encoding="ascii", errors="strict"):
if not isinstance(s, str):
return s.decode(encoding, errors)
else:
return s
def tobytes(s, encoding="ascii", errors="strict"):
if isinstance(s, str):
return s.encode(encoding, errors)
else:
return bytes(s)
def bytechr(n):
return bytes([n])
def byteord(c):
return c if isinstance(c, int) else ord(c)
def strjoin(iterable, joiner=""):
return tostr(joiner).join(iterable)
def bytesjoin(iterable, joiner=b""):
return tobytes(joiner).join(tobytes(item) for item in iterable)
if __name__ == "__main__":
import doctest, sys
sys.exit(doctest.testmod().failed)
"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.
"""
import os
import time
from datetime import datetime, timezone
import calendar
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHNAMES = [
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
def asctime(t=None):
"""
Convert a tuple or struct_time representing a time as returned by gmtime()
or localtime() to a 24-character string of the following form:
>>> asctime(time.gmtime(0))
'Thu Jan 1 00:00:00 1970'
If t is not provided, the current time as returned by localtime() is used.
Locale information is not used by asctime().
This is meant to normalise the output of the built-in time.asctime() across
different platforms and Python versions.
In Python 3.x, the day of the month is right-justified, whereas on Windows
Python 2.7 it is padded with zeros.
See https://github.com/fonttools/fonttools/issues/455
"""
if t is None:
t = time.localtime()
s = "%s %s %2s %s" % (
DAYNAMES[t.tm_wday],
MONTHNAMES[t.tm_mon],
t.tm_mday,
time.strftime("%H:%M:%S %Y", t),
)
return s
def timestampToString(value):
return asctime(time.gmtime(max(0, value + epoch_diff)))
def timestampFromString(value):
wkday, mnth = value[:7].split()
t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
wkday_idx = DAYNAMES.index(wkday)
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
return int(t.timestamp()) - epoch_diff
def timestampNow():
# https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
if source_date_epoch is not None:
return int(source_date_epoch) - epoch_diff
return int(time.time() - epoch_diff)
def timestampSinceEpoch(value):
return int(value - epoch_diff)
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
"""Generic tools for working with trees."""
from math import ceil, log
def build_n_ary_tree(leaves, n):
"""Build N-ary tree from sequence of leaf nodes.
Return a list of lists where each non-leaf node is a list containing
max n nodes.
"""
if not leaves:
return []
assert n > 1
depth = ceil(log(len(leaves), n))
if depth <= 1:
return list(leaves)
# Fully populate complete subtrees of root until we have enough leaves left
root = []
unassigned = None
full_step = n ** (depth - 1)
for i in range(0, len(leaves), full_step):
subtree = leaves[i : i + full_step]
if len(subtree) < full_step:
unassigned = subtree
break
while len(subtree) > n:
subtree = [subtree[k : k + n] for k in range(0, len(subtree), n)]
root.append(subtree)
if unassigned:
# Recurse to fill the last subtree, which is the only partially populated one
subtree = build_n_ary_tree(unassigned, n)
if len(subtree) <= n - len(root):
# replace last subtree with its children if they can still fit
root.extend(subtree)
else:
root.append(subtree)
assert len(root) <= n
return root
venv\Lib\site-packages\fontTools\misc\vector.py
from numbers import Number
import math
import operator
import warnings
__all__ = ["Vector"]
class Vector(tuple):
"""A math-like vector.
Represents an n-dimensional numeric vector. ``Vector`` objects support
vector addition and subtraction, scalar multiplication and division,
negation, rounding, and comparison tests.
"""
__slots__ = ()
def __new__(cls, values, keep=False):
if keep is not False:
warnings.warn(
"the 'keep' argument has been deprecated",
DeprecationWarning,
)
if type(values) == Vector:
# No need to create a new object
return values
return super().__new__(cls, values)
def __repr__(self):
return f"{self.__class__.__name__}({super().__repr__()})"
def _vectorOp(self, other, op):
if isinstance(other, Vector):
assert len(self) == len(other)
return self.__class__(op(a, b) for a, b in zip(self, other))
if isinstance(other, Number):
return self.__class__(op(v, other) for v in self)
raise NotImplementedError()
def _scalarOp(self, other, op):
if isinstance(other, Number):
return self.__class__(op(v, other) for v in self)
raise NotImplementedError()
def _unaryOp(self, op):
return self.__class__(op(v) for v in self)
def __add__(self, other):
return self._vectorOp(other, operator.add)
__radd__ = __add__
def __sub__(self, other):
return self._vectorOp(other, operator.sub)
def __rsub__(self, other):
return self._vectorOp(other, _operator_rsub)
def __mul__(self, other):
return self._scalarOp(other, operator.mul)
__rmul__ = __mul__
def __truediv__(self, other):
return self._scalarOp(other, operator.truediv)
def __rtruediv__(self, other):
return self._scalarOp(other, _operator_rtruediv)
def __pos__(self):
return self._unaryOp(operator.pos)
def __neg__(self):
return self._unaryOp(operator.neg)
def __round__(self, *, round=round):
return self._unaryOp(round)
def __eq__(self, other):
if isinstance(other, list):
# bw compat Vector([1, 2, 3]) == [1, 2, 3]
other = tuple(other)
return super().__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __bool__(self):
return any(self)
__nonzero__ = __bool__
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def length(self):
"""Return the length of the vector. Equivalent to abs(vector)."""
return abs(self)
def normalized(self):
"""Return the normalized vector of the vector."""
return self / abs(self)
def dot(self, other):
"""Performs vector dot product, returning the sum of
``a[0] * b[0], a[1] * b[1], ...``"""
assert len(self) == len(other)
return sum(a * b for a, b in zip(self, other))
# Deprecated methods/properties
def toInt(self):
warnings.warn(
"the 'toInt' method has been deprecated, use round(vector) instead",
DeprecationWarning,
)
return self.__round__()
@property
def values(self):
warnings.warn(
"the 'values' attribute has been deprecated, use "
"the vector object itself instead",
DeprecationWarning,
)
return list(self)
@values.setter
def values(self, values):
raise AttributeError(
"can't set attribute, the 'values' attribute has been deprecated",
)
def isclose(self, other: "Vector", **kwargs) -> bool:
"""Return True if the vector is close to another Vector."""
assert len(self) == len(other)
return all(math.isclose(a, b, **kwargs) for a, b in zip(self, other))
def _operator_rsub(a, b):
return operator.sub(b, a)
def _operator_rtruediv(a, b):
return operator.truediv(b, a)
venv\Lib\site-packages\fontTools\misc\visitor.py
"""Generic visitor pattern implementation for Python objects."""
import enum
import weakref
class Visitor(object):
defaultStop = False
_visitors = {
# By default we skip visiting weak references to avoid recursion
# issues. Users can override this by registering a visit
# function for weakref.ProxyType.
weakref.ProxyType: {None: lambda self, obj, *args, **kwargs: False}
}
@classmethod
def _register(celf, clazzes_attrs):
assert celf != Visitor, "Subclass Visitor instead."
if "_visitors" not in celf.__dict__:
celf._visitors = {}
def wrapper(method):
assert method.__name__ == "visit"
for clazzes, attrs in clazzes_attrs:
if type(clazzes) != tuple:
clazzes = (clazzes,)
if type(attrs) == str:
attrs = (attrs,)
for clazz in clazzes:
_visitors = celf._visitors.setdefault(clazz, {})
for attr in attrs:
assert attr not in _visitors, (
"Oops, class '%s' has visitor function for '%s' defined already."
% (clazz.__name__, attr)
)
_visitors[attr] = method
return None
return wrapper
@classmethod
def register(celf, clazzes):
if type(clazzes) != tuple:
clazzes = (clazzes,)
return celf._register([(clazzes, (None,))])
@classmethod
def register_attr(celf, clazzes, attrs):
clazzes_attrs = []
if type(clazzes) != tuple:
clazzes = (clazzes,)
if type(attrs) == str:
attrs = (attrs,)
for clazz in clazzes:
clazzes_attrs.append((clazz, attrs))
return celf._register(clazzes_attrs)
@classmethod
def register_attrs(celf, clazzes_attrs):
return celf._register(clazzes_attrs)
@classmethod
def _visitorsFor(celf, thing, _default={}):
typ = type(thing)
for celf in celf.mro():
_visitors = getattr(celf, "_visitors", None)
if _visitors is None:
break
for base in typ.mro():
m = celf._visitors.get(base, None)
if m is not None:
return m
return _default
def visitObject(self, obj, *args, **kwargs):
"""Called to visit an object. This function loops over all non-private
attributes of the objects and calls any user-registered (via
@register_attr() or @register_attrs()) visit() functions.
If there is no user-registered visit function, of if there is and it
returns True, or it returns None (or doesn't return anything) and
visitor.defaultStop is False (default), then the visitor will proceed
to call self.visitAttr()"""
keys = sorted(vars(obj).keys())
_visitors = self._visitorsFor(obj)
defaultVisitor = _visitors.get("*", None)
for key in keys:
if key[0] == "_":
continue
value = getattr(obj, key)
visitorFunc = _visitors.get(key, defaultVisitor)
if visitorFunc is not None:
ret = visitorFunc(self, obj, key, value, *args, **kwargs)
if ret == False or (ret is None and self.defaultStop):
continue
self.visitAttr(obj, key, value, *args, **kwargs)
def visitAttr(self, obj, attr, value, *args, **kwargs):
"""Called to visit an attribute of an object."""
self.visit(value, *args, **kwargs)
def visitList(self, obj, *args, **kwargs):
"""Called to visit any value that is a list."""
for value in obj:
self.visit(value, *args, **kwargs)
def visitDict(self, obj, *args, **kwargs):
"""Called to visit any value that is a dictionary."""
for value in obj.values():
self.visit(value, *args, **kwargs)
def visitLeaf(self, obj, *args, **kwargs):
"""Called to visit any value that is not an object, list,
or dictionary."""
pass
def visit(self, obj, *args, **kwargs):
"""This is the main entry to the visitor. The visitor will visit object
obj.
The visitor will first determine if there is a registered (via
@register()) visit function for the type of object. If there is, it
will be called, and (visitor, obj, *args, **kwargs) will be passed to
the user visit function.
If there is no user-registered visit function, of if there is and it
returns True, or it returns None (or doesn't return anything) and
visitor.defaultStop is False (default), then the visitor will proceed
to dispatch to one of self.visitObject(), self.visitList(),
self.visitDict(), or self.visitLeaf() (any of which can be overriden in
a subclass)."""
visitorFunc = self._visitorsFor(obj).get(None, None)
if visitorFunc is not None:
ret = visitorFunc(self, obj, *args, **kwargs)
if ret == False or (ret is None and self.defaultStop):
return
if hasattr(obj, "__dict__") and not isinstance(obj, enum.Enum):
self.visitObject(obj, *args, **kwargs)
elif isinstance(obj, list):
self.visitList(obj, *args, **kwargs)
elif isinstance(obj, dict):
self.visitDict(obj, *args, **kwargs)
else:
self.visitLeaf(obj, *args, **kwargs)
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import sys
import os
import logging
log = logging.getLogger(__name__)
class TTXParseError(Exception):
pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(
self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False
):
if fileOrPath == "-":
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.contentOnly = contentOnly
self.stackSize = 0
def read(self, rootless=False):
if rootless:
self.stackSize += 1
if self.progress:
self.file.seek(0, 2)
fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0)
self._parseFile(self.file)
if self._closeStream:
self.close()
if rootless:
self.stackSize -= 1
def close(self):
self.file.close()
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _startElementHandler(self, name, attrs):
if self.stackSize == 1 and self.contentOnly:
# We already know the table we're parsing, skip
# parsing the table tag and continue to
# stack '2' which begins parsing content
self.contentStack.append([])
self.stackSize = 2
return
stackSize = self.stackSize
self.stackSize = stackSize + 1
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, "name"):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
if self.ttFont.reader is None and not self.ttFont.tables:
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == "loca" and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2 and subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
subReader.read()
self.contentStack.append([])
self.root = subReader.root
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _characterDataHandler(self, data):
if self.stackSize > 1:
# parser parses in chunks, so we may get multiple calls
# for the same text node; thus we need to append the data
# to the last item in the content stack:
# https://github.com/fonttools/fonttools/issues/2614
if (
data != "\n"
and self.contentStack[-1]
and isinstance(self.contentStack[-1][-1], str)
and self.contentStack[-1][-1] != "\n"
):
self.contentStack[-1][-1] += data
else:
self.contentStack[-1].append(data)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if not self.contentOnly:
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def set(self, val, maxval=None):
pass
def increment(self, val=1):
pass
def setLabel(self, text):
print(text)
from __future__ import annotations
from collections import namedtuple, OrderedDict
import itertools
from typing import Dict, Union
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.misc.roundTools import otRound
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import (
ValueRecord,
valueRecordFormatDict,
OTLOffsetOverflowError,
OTTableWriter,
)
from fontTools.ttLib.ttFont import TTFont
from fontTools.feaLib.ast import STATNameStatement
from fontTools.otlLib.optimize.gpos import (
_compression_level_from_env,
compact_lookup,
)
from fontTools.otlLib.error import OpenTypeLibError
from fontTools.misc.loggingTools import deprecateFunction
from functools import reduce
import logging
import copy
log = logging.getLogger(__name__)
def buildCoverage(glyphs, glyphMap):
"""Builds a coverage table.
Coverage tables (as defined in the `OpenType spec `__)
are used in all OpenType Layout lookups apart from the Extension type, and
define the glyphs involved in a layout subtable. This allows shaping engines
to compare the glyph stream with the coverage table and quickly determine
whether a subtable should be involved in a shaping operation.
This function takes a list of glyphs and a glyphname-to-ID map, and
returns a ``Coverage`` object representing the coverage table.
Example::
glyphMap = font.getReverseGlyphMap()
glyphs = [ "A", "B", "C" ]
coverage = buildCoverage(glyphs, glyphMap)
Args:
glyphs: a sequence of glyph names.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.Coverage`` object or ``None`` if there are no glyphs
supplied.
"""
if not glyphs:
return None
self = ot.Coverage()
try:
self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
except KeyError as e:
raise ValueError(f"Could not find glyph {e} in font") from e
return self
LOOKUP_FLAG_RIGHT_TO_LEFT = 0x0001
LOOKUP_FLAG_IGNORE_BASE_GLYPHS = 0x0002
LOOKUP_FLAG_IGNORE_LIGATURES = 0x0004
LOOKUP_FLAG_IGNORE_MARKS = 0x0008
LOOKUP_FLAG_USE_MARK_FILTERING_SET = 0x0010
def buildLookup(subtables, flags=0, markFilterSet=None, table=None, extension=False):
"""Turns a collection of rules into a lookup.
A Lookup (as defined in the `OpenType Spec `__)
wraps the individual rules in a layout operation (substitution or
positioning) in a data structure expressing their overall lookup type -
for example, single substitution, mark-to-base attachment, and so on -
as well as the lookup flags and any mark filtering sets. You may import
the following constants to express lookup flags:
- ``LOOKUP_FLAG_RIGHT_TO_LEFT``
- ``LOOKUP_FLAG_IGNORE_BASE_GLYPHS``
- ``LOOKUP_FLAG_IGNORE_LIGATURES``
- ``LOOKUP_FLAG_IGNORE_MARKS``
- ``LOOKUP_FLAG_USE_MARK_FILTERING_SET``
Args:
subtables: A list of layout subtable objects (e.g.
``MultipleSubst``, ``PairPos``, etc.) or ``None``.
flags (int): This lookup's flags.
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
table (str): The name of the table this lookup belongs to, e.g. "GPOS" or "GSUB".
extension (bool): ``True`` if this is an extension lookup, ``False`` otherwise.
Returns:
An ``otTables.Lookup`` object or ``None`` if there are no subtables
supplied.
"""
if subtables is None:
return None
subtables = [st for st in subtables if st is not None]
if not subtables:
return None
assert all(
t.LookupType == subtables[0].LookupType for t in subtables
), "all subtables must have the same LookupType; got %s" % repr(
[t.LookupType for t in subtables]
)
if extension:
assert table in ("GPOS", "GSUB")
lookupType = 7 if table == "GSUB" else 9
extSubTableClass = ot.lookupTypes[table][lookupType]
for i, st in enumerate(subtables):
subtables[i] = extSubTableClass()
subtables[i].Format = 1
subtables[i].ExtSubTable = st
subtables[i].ExtensionLookupType = st.LookupType
else:
lookupType = subtables[0].LookupType
self = ot.Lookup()
self.LookupType = lookupType
self.LookupFlag = flags
self.SubTable = subtables
self.SubTableCount = len(self.SubTable)
if markFilterSet is not None:
self.LookupFlag |= LOOKUP_FLAG_USE_MARK_FILTERING_SET
assert isinstance(markFilterSet, int), markFilterSet
self.MarkFilteringSet = markFilterSet
else:
assert (self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET) == 0, (
"if markFilterSet is None, flags must not set "
"LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags
)
return self
class LookupBuilder(object):
SUBTABLE_BREAK_ = "SUBTABLE_BREAK"
def __init__(self, font, location, table, lookup_type, extension=False):
self.font = font
self.glyphMap = font.getReverseGlyphMap()
self.location = location
self.table, self.lookup_type = table, lookup_type
self.lookupflag = 0
self.markFilterSet = None
self.lookup_index = None # assigned when making final tables
self.extension = extension
assert table in ("GPOS", "GSUB")
def equals(self, other):
return (
isinstance(other, self.__class__)
and self.table == other.table
and self.lookupflag == other.lookupflag
and self.markFilterSet == other.markFilterSet
and self.extension == other.extension
)
def promote_lookup_type(self, is_named_lookup):
return [self]
def inferGlyphClasses(self):
"""Infers glyph glasses for the GDEF table, such as {"cedilla":3}."""
return {}
def getAlternateGlyphs(self):
"""Helper for building 'aalt' features."""
return {}
def buildLookup_(self, subtables):
return buildLookup(
subtables,
self.lookupflag,
self.markFilterSet,
self.table,
self.extension,
)
def buildMarkClasses_(self, marks):
"""{"cedilla": ("BOTTOM", ast.Anchor), ...} --> {"BOTTOM":0, "TOP":1}
Helper for MarkBasePostBuilder, MarkLigPosBuilder, and
MarkMarkPosBuilder. Seems to return the same numeric IDs
for mark classes as the AFDKO makeotf tool.
"""
ids = {}
for mark in sorted(marks.keys(), key=self.font.getGlyphID):
markClassName, _markAnchor = marks[mark]
if markClassName not in ids:
ids[markClassName] = len(ids)
return ids
def setBacktrackCoverage_(self, prefix, subtable):
subtable.BacktrackGlyphCount = len(prefix)
subtable.BacktrackCoverage = []
for p in reversed(prefix):
coverage = buildCoverage(p, self.glyphMap)
subtable.BacktrackCoverage.append(coverage)
def setLookAheadCoverage_(self, suffix, subtable):
subtable.LookAheadGlyphCount = len(suffix)
subtable.LookAheadCoverage = []
for s in suffix:
coverage = buildCoverage(s, self.glyphMap)
subtable.LookAheadCoverage.append(coverage)
def setInputCoverage_(self, glyphs, subtable):
subtable.InputGlyphCount = len(glyphs)
subtable.InputCoverage = []
for g in glyphs:
coverage = buildCoverage(g, self.glyphMap)
subtable.InputCoverage.append(coverage)
def setCoverage_(self, glyphs, subtable):
subtable.GlyphCount = len(glyphs)
subtable.Coverage = []
for g in glyphs:
coverage = buildCoverage(g, self.glyphMap)
subtable.Coverage.append(coverage)
def build_subst_subtables(self, mapping, klass):
substitutions = [{}]
for key in mapping:
if key[0] == self.SUBTABLE_BREAK_:
substitutions.append({})
else:
substitutions[-1][key] = mapping[key]
subtables = [klass(s) for s in substitutions]
return subtables
def add_subtable_break(self, location):
"""Add an explicit subtable break.
Args:
location: A string or tuple representing the location in the
original source which produced this break, or ``None`` if
no location is provided.
"""
log.warning(
OpenTypeLibError(
'unsupported "subtable" statement for lookup type', location
)
)
def can_add_mapping(self, _mapping) -> bool:
# used by AnySubstBuilder, below
return True
class AlternateSubstBuilder(LookupBuilder):
"""Builds an Alternate Substitution (GSUB3) lookup.
Users are expected to manually add alternate glyph substitutions to
the ``alternates`` attribute after the object has been initialized,
e.g.::
builder.alternates["A"] = ["A.alt1", "A.alt2"]
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
alternates: An ordered dictionary of alternates, mapping glyph names
to a list of names of alternates.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 3)
self.alternates = OrderedDict()
def equals(self, other):
return LookupBuilder.equals(self, other) and self.alternates == other.alternates
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the alternate
substitution lookup.
"""
subtables = self.build_subst_subtables(
self.alternates, buildAlternateSubstSubtable
)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
return self.alternates
def add_subtable_break(self, location):
self.alternates[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class ChainContextualRule(
namedtuple("ChainContextualRule", ["prefix", "glyphs", "suffix", "lookups"])
):
@property
def is_subtable_break(self):
return self.prefix == LookupBuilder.SUBTABLE_BREAK_
class ChainContextualRuleset:
def __init__(self):
self.rules = []
def addRule(self, rule):
self.rules.append(rule)
@property
def hasPrefixOrSuffix(self):
# Do we have any prefixes/suffixes? If this is False for all
# rulesets, we can express the whole lookup as GPOS5/GSUB7.
for rule in self.rules:
if len(rule.prefix) > 0 or len(rule.suffix) > 0:
return True
return False
@property
def hasAnyGlyphClasses(self):
# Do we use glyph classes anywhere in the rules? If this is False
# we can express this subtable as a Format 1.
for rule in self.rules:
for coverage in (rule.prefix, rule.glyphs, rule.suffix):
if any(len(x) > 1 for x in coverage):
return True
return False
def format2ClassDefs(self):
PREFIX, GLYPHS, SUFFIX = 0, 1, 2
classDefBuilders = []
for ix in [PREFIX, GLYPHS, SUFFIX]:
context = []
for r in self.rules:
context.append(r[ix])
classes = self._classBuilderForContext(context)
if not classes:
return None
classDefBuilders.append(classes)
return classDefBuilders
def _classBuilderForContext(self, context):
classdefbuilder = ClassDefBuilder(useClass0=False)
for position in context:
for glyphset in position:
glyphs = set(glyphset)
if not classdefbuilder.canAdd(glyphs):
return None
classdefbuilder.add(glyphs)
return classdefbuilder
class ChainContextualBuilder(LookupBuilder):
def equals(self, other):
return LookupBuilder.equals(self, other) and self.rules == other.rules
def rulesets(self):
# Return a list of ChainContextRuleset objects, taking explicit
# subtable breaks into account
ruleset = [ChainContextualRuleset()]
for rule in self.rules:
if rule.is_subtable_break:
ruleset.append(ChainContextualRuleset())
continue
ruleset[-1].addRule(rule)
# Squish any empty subtables
return [x for x in ruleset if len(x.rules) > 0]
def getCompiledSize_(self, subtables):
if not subtables:
return 0
# We need to make a copy here because compiling
# modifies the subtable (finalizing formats etc.)
table = self.buildLookup_(copy.deepcopy(subtables))
w = OTTableWriter()
table.compile(w, self.font)
size = len(w.getAllData())
return size
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the chained
contextual positioning lookup.
"""
subtables = []
rulesets = self.rulesets()
chaining = any(ruleset.hasPrefixOrSuffix for ruleset in rulesets)
# https://github.com/fonttools/fonttools/issues/2539
#
# Unfortunately, as of 2022-03-07, Apple's CoreText renderer does not
# correctly process GPOS7 lookups, so for now we force contextual
# positioning lookups to be chaining (GPOS8).
#
# This seems to be fixed as of macOS 13.2, but we keep disabling this
# for now until we are no longer concerned about old macOS versions.
# But we allow people to opt-out of this with the config key below.
write_gpos7 = self.font.cfg.get("fontTools.otlLib.builder:WRITE_GPOS7")
# horrible separation of concerns breach
if not write_gpos7 and self.subtable_type == "Pos":
chaining = True
for ruleset in rulesets:
# Determine format strategy. We try to build formats 1, 2 and 3
# subtables and then work out which is best. candidates list holds
# the subtables in each format for this ruleset (including a dummy
# "format 0" to make the addressing match the format numbers).
# We can always build a format 3 lookup by accumulating each of
# the rules into a list, so start with that.
candidates = [None, None, None, []]
for rule in ruleset.rules:
candidates[3].append(self.buildFormat3Subtable(rule, chaining))
# Can we express the whole ruleset as a format 2 subtable?
classdefs = ruleset.format2ClassDefs()
if classdefs:
candidates[2] = [
self.buildFormat2Subtable(ruleset, classdefs, chaining)
]
if not ruleset.hasAnyGlyphClasses:
candidates[1] = [self.buildFormat1Subtable(ruleset, chaining)]
candidates_by_size = []
for i in [1, 2, 3]:
if candidates[i]:
try:
size = self.getCompiledSize_(candidates[i])
except OTLOffsetOverflowError as e:
log.warning(
"Contextual format %i at %s overflowed (%s)"
% (i, str(self.location), e)
)
else:
candidates_by_size.append((size, candidates[i]))
if not candidates_by_size:
raise OpenTypeLibError("All candidates overflowed", self.location)
_min_size, winner = min(candidates_by_size, key=lambda x: x[0])
subtables.extend(winner)
# If we are not chaining, lookup type will be automatically fixed by
# buildLookup_
return self.buildLookup_(subtables)
def buildFormat1Subtable(self, ruleset, chaining=True):
st = self.newSubtable_(chaining=chaining)
st.Format = 1
st.populateDefaults()
coverage = set()
rulesetsByFirstGlyph = {}
ruleAttr = self.ruleAttr_(format=1, chaining=chaining)
for rule in ruleset.rules:
ruleAsSubtable = self.newRule_(format=1, chaining=chaining)
if chaining:
ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix)
ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix)
ruleAsSubtable.Backtrack = [list(x)[0] for x in reversed(rule.prefix)]
ruleAsSubtable.LookAhead = [list(x)[0] for x in rule.suffix]
ruleAsSubtable.InputGlyphCount = len(rule.glyphs)
else:
ruleAsSubtable.GlyphCount = len(rule.glyphs)
ruleAsSubtable.Input = [list(x)[0] for x in rule.glyphs[1:]]
self.buildLookupList(rule, ruleAsSubtable)
firstGlyph = list(rule.glyphs[0])[0]
if firstGlyph not in rulesetsByFirstGlyph:
coverage.add(firstGlyph)
rulesetsByFirstGlyph[firstGlyph] = []
rulesetsByFirstGlyph[firstGlyph].append(ruleAsSubtable)
st.Coverage = buildCoverage(coverage, self.glyphMap)
ruleSets = []
for g in st.Coverage.glyphs:
ruleSet = self.newRuleSet_(format=1, chaining=chaining)
setattr(ruleSet, ruleAttr, rulesetsByFirstGlyph[g])
setattr(ruleSet, f"{ruleAttr}Count", len(rulesetsByFirstGlyph[g]))
ruleSets.append(ruleSet)
setattr(st, self.ruleSetAttr_(format=1, chaining=chaining), ruleSets)
setattr(
st, self.ruleSetAttr_(format=1, chaining=chaining) + "Count", len(ruleSets)
)
return st
def buildFormat2Subtable(self, ruleset, classdefs, chaining=True):
st = self.newSubtable_(chaining=chaining)
st.Format = 2
st.populateDefaults()
if chaining:
(
st.BacktrackClassDef,
st.InputClassDef,
st.LookAheadClassDef,
) = [c.build() for c in classdefs]
else:
st.ClassDef = classdefs[1].build()
inClasses = classdefs[1].classes()
classSets = []
for _ in inClasses:
classSet = self.newRuleSet_(format=2, chaining=chaining)
classSets.append(classSet)
coverage = set()
classRuleAttr = self.ruleAttr_(format=2, chaining=chaining)
for rule in ruleset.rules:
ruleAsSubtable = self.newRule_(format=2, chaining=chaining)
if chaining:
ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix)
ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix)
# The glyphs in the rule may be list, tuple, odict_keys...
# Order is not important anyway because they are guaranteed
# to be members of the same class.
ruleAsSubtable.Backtrack = [
st.BacktrackClassDef.classDefs[list(x)[0]]
for x in reversed(rule.prefix)
]
ruleAsSubtable.LookAhead = [
st.LookAheadClassDef.classDefs[list(x)[0]] for x in rule.suffix
]
ruleAsSubtable.InputGlyphCount = len(rule.glyphs)
ruleAsSubtable.Input = [
st.InputClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:]
]
setForThisRule = classSets[
st.InputClassDef.classDefs[list(rule.glyphs[0])[0]]
]
else:
ruleAsSubtable.GlyphCount = len(rule.glyphs)
ruleAsSubtable.Class = [ # The spec calls this InputSequence
st.ClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:]
]
setForThisRule = classSets[
st.ClassDef.classDefs[list(rule.glyphs[0])[0]]
]
self.buildLookupList(rule, ruleAsSubtable)
coverage |= set(rule.glyphs[0])
getattr(setForThisRule, classRuleAttr).append(ruleAsSubtable)
setattr(
setForThisRule,
f"{classRuleAttr}Count",
getattr(setForThisRule, f"{classRuleAttr}Count") + 1,
)
for i, classSet in enumerate(classSets):
if not getattr(classSet, classRuleAttr):
# class sets can be null so replace nop sets with None
classSets[i] = None
setattr(st, self.ruleSetAttr_(format=2, chaining=chaining), classSets)
setattr(
st, self.ruleSetAttr_(format=2, chaining=chaining) + "Count", len(classSets)
)
st.Coverage = buildCoverage(coverage, self.glyphMap)
return st
def buildFormat3Subtable(self, rule, chaining=True):
st = self.newSubtable_(chaining=chaining)
st.Format = 3
if chaining:
self.setBacktrackCoverage_(rule.prefix, st)
self.setLookAheadCoverage_(rule.suffix, st)
self.setInputCoverage_(rule.glyphs, st)
else:
self.setCoverage_(rule.glyphs, st)
self.buildLookupList(rule, st)
return st
def buildLookupList(self, rule, st):
for sequenceIndex, lookupList in enumerate(rule.lookups):
if lookupList is not None:
if not isinstance(lookupList, list):
# Can happen with synthesised lookups
lookupList = [lookupList]
for l in lookupList:
if l.lookup_index is None:
if isinstance(self, ChainContextPosBuilder):
other = "substitution"
else:
other = "positioning"
raise OpenTypeLibError(
"Missing index of the specified "
f"lookup, might be a {other} lookup",
self.location,
)
rec = self.newLookupRecord_(st)
rec.SequenceIndex = sequenceIndex
rec.LookupListIndex = l.lookup_index
def add_subtable_break(self, location):
self.rules.append(
ChainContextualRule(
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
[self.SUBTABLE_BREAK_],
)
)
def newSubtable_(self, chaining=True):
subtablename = f"Context{self.subtable_type}"
if chaining:
subtablename = "Chain" + subtablename
st = getattr(ot, subtablename)() # ot.ChainContextPos()/ot.ChainSubst()/etc.
setattr(st, f"{self.subtable_type}Count", 0)
setattr(st, f"{self.subtable_type}LookupRecord", [])
return st
# Format 1 and format 2 GSUB5/GSUB6/GPOS7/GPOS8 rulesets and rules form a family:
#
# format 1 ruleset format 1 rule format 2 ruleset format 2 rule
# GSUB5 SubRuleSet SubRule SubClassSet SubClassRule
# GSUB6 ChainSubRuleSet ChainSubRule ChainSubClassSet ChainSubClassRule
# GPOS7 PosRuleSet PosRule PosClassSet PosClassRule
# GPOS8 ChainPosRuleSet ChainPosRule ChainPosClassSet ChainPosClassRule
#
# The following functions generate the attribute names and subtables according
# to this naming convention.
def ruleSetAttr_(self, format=1, chaining=True):
if format == 1:
formatType = "Rule"
elif format == 2:
formatType = "Class"
else:
raise AssertionError(formatType)
subtablename = f"{self.subtable_type[0:3]}{formatType}Set" # Sub, not Subst.
if chaining:
subtablename = "Chain" + subtablename
return subtablename
def ruleAttr_(self, format=1, chaining=True):
if format == 1:
formatType = ""
elif format == 2:
formatType = "Class"
else:
raise AssertionError(formatType)
subtablename = f"{self.subtable_type[0:3]}{formatType}Rule" # Sub, not Subst.
if chaining:
subtablename = "Chain" + subtablename
return subtablename
def newRuleSet_(self, format=1, chaining=True):
st = getattr(
ot, self.ruleSetAttr_(format, chaining)
)() # ot.ChainPosRuleSet()/ot.SubRuleSet()/etc.
st.populateDefaults()
return st
def newRule_(self, format=1, chaining=True):
st = getattr(
ot, self.ruleAttr_(format, chaining)
)() # ot.ChainPosClassRule()/ot.SubClassRule()/etc.
st.populateDefaults()
return st
def attachSubtableWithCount_(
self, st, subtable_name, count_name, existing=None, index=None, chaining=False
):
if chaining:
subtable_name = "Chain" + subtable_name
count_name = "Chain" + count_name
if not hasattr(st, count_name):
setattr(st, count_name, 0)
setattr(st, subtable_name, [])
if existing:
new_subtable = existing
else:
# Create a new, empty subtable from otTables
new_subtable = getattr(ot, subtable_name)()
setattr(st, count_name, getattr(st, count_name) + 1)
if index:
getattr(st, subtable_name).insert(index, new_subtable)
else:
getattr(st, subtable_name).append(new_subtable)
return new_subtable
def newLookupRecord_(self, st):
return self.attachSubtableWithCount_(
st,
f"{self.subtable_type}LookupRecord",
f"{self.subtable_type}Count",
chaining=False,
) # Oddly, it isn't ChainSubstLookupRecord
class ChainContextPosBuilder(ChainContextualBuilder):
"""Builds a Chained Contextual Positioning (GPOS8) lookup.
Users are expected to manually add rules to the ``rules`` attribute after
the object has been initialized, e.g.::
# pos [A B] [C D] x' lookup lu1 y' z' lookup lu2 E;
prefix = [ ["A", "B"], ["C", "D"] ]
suffix = [ ["E"] ]
glyphs = [ ["x"], ["y"], ["z"] ]
lookups = [ [lu1], None, [lu2] ]
builder.rules.append( (prefix, glyphs, suffix, lookups) )
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
rules: A list of tuples representing the rules in this lookup.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 8)
self.rules = []
self.subtable_type = "Pos"
def find_chainable_single_pos(self, lookups, glyphs, value):
"""Helper for add_single_pos_chained_()"""
res = None
for lookup in lookups[::-1]:
if lookup == self.SUBTABLE_BREAK_:
return res
if isinstance(lookup, SinglePosBuilder) and all(
lookup.can_add(glyph, value) for glyph in glyphs
):
res = lookup
return res
class ChainContextSubstBuilder(ChainContextualBuilder):
"""Builds a Chained Contextual Substitution (GSUB6) lookup.
Users are expected to manually add rules to the ``rules`` attribute after
the object has been initialized, e.g.::
# sub [A B] [C D] x' lookup lu1 y' z' lookup lu2 E;
prefix = [ ["A", "B"], ["C", "D"] ]
suffix = [ ["E"] ]
glyphs = [ ["x"], ["y"], ["z"] ]
lookups = [ [lu1], None, [lu2] ]
builder.rules.append( (prefix, glyphs, suffix, lookups) )
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
rules: A list of tuples representing the rules in this lookup.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 6)
self.rules = [] # (prefix, input, suffix, lookups)
self.subtable_type = "Subst"
def getAlternateGlyphs(self):
result = {}
for rule in self.rules:
if rule.is_subtable_break:
continue
for lookups in rule.lookups:
if not isinstance(lookups, list):
lookups = [lookups]
for lookup in lookups:
if lookup is not None:
alts = lookup.getAlternateGlyphs()
for glyph, replacements in alts.items():
alts_for_glyph = result.setdefault(glyph, [])
alts_for_glyph.extend(
g for g in replacements if g not in alts_for_glyph
)
return result
def find_chainable_subst(self, mapping, builder_class):
"""Helper for add_{single,multi}_subst_chained_()"""
res = None
for rule in self.rules[::-1]:
if rule.is_subtable_break:
return res
for sub in rule.lookups:
if isinstance(sub, builder_class) and not any(
g in mapping and mapping[g] != sub.mapping[g] for g in sub.mapping
):
res = sub
return res
def find_chainable_ligature_subst(self, glyphs, replacement):
"""Helper for add_ligature_subst_chained_()"""
res = None
for rule in self.rules[::-1]:
if rule.is_subtable_break:
return res
for sub in rule.lookups:
if not isinstance(sub, LigatureSubstBuilder):
continue
if all(
sub.ligatures.get(seq, replacement) == replacement
for seq in itertools.product(*glyphs)
):
res = sub
return res
class LigatureSubstBuilder(LookupBuilder):
"""Builds a Ligature Substitution (GSUB4) lookup.
Users are expected to manually add ligatures to the ``ligatures``
attribute after the object has been initialized, e.g.::
# sub f i by f_i;
builder.ligatures[("f","f","i")] = "f_f_i"
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
ligatures: An ordered dictionary mapping a tuple of glyph names to the
ligature glyphname.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 4)
self.ligatures = OrderedDict() # {('f','f','i'): 'f_f_i'}
def equals(self, other):
return LookupBuilder.equals(self, other) and self.ligatures == other.ligatures
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the ligature
substitution lookup.
"""
subtables = self.build_subst_subtables(
self.ligatures, buildLigatureSubstSubtable
)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
# https://github.com/fonttools/fonttools/issues/3845
return {
components[0]: [ligature]
for components, ligature in self.ligatures.items()
if len(components) == 1
}
def add_subtable_break(self, location):
self.ligatures[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class MultipleSubstBuilder(LookupBuilder):
"""Builds a Multiple Substitution (GSUB2) lookup.
Users are expected to manually add substitutions to the ``mapping``
attribute after the object has been initialized, e.g.::
# sub uni06C0 by uni06D5.fina hamza.above;
builder.mapping["uni06C0"] = [ "uni06D5.fina", "hamza.above"]
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: An ordered dictionary mapping a glyph name to a list of
substituted glyph names.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 2)
self.mapping = OrderedDict()
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
subtables = self.build_subst_subtables(self.mapping, buildMultipleSubstSubtable)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
# https://github.com/fonttools/fonttools/issues/3845
return {
glyph: replacements
for glyph, replacements in self.mapping.items()
if len(replacements) == 1
}
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class CursivePosBuilder(LookupBuilder):
"""Builds a Cursive Positioning (GPOS3) lookup.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
attachments: An ordered dictionary mapping a glyph name to a two-element
tuple of ``otTables.Anchor`` objects.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 3)
self.attachments = {}
def equals(self, other):
return (
LookupBuilder.equals(self, other) and self.attachments == other.attachments
)
def add_attachment(self, location, glyphs, entryAnchor, exitAnchor):
"""Adds attachment information to the cursive positioning lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this lookup. (Unused.)
glyphs: A list of glyph names sharing these entry and exit
anchor locations.
entryAnchor: A ``otTables.Anchor`` object representing the
entry anchor, or ``None`` if no entry anchor is present.
exitAnchor: A ``otTables.Anchor`` object representing the
exit anchor, or ``None`` if no exit anchor is present.
"""
for glyph in glyphs:
self.attachments[glyph] = (entryAnchor, exitAnchor)
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the cursive
positioning lookup.
"""
attachments = [{}]
for key in self.attachments:
if key[0] == self.SUBTABLE_BREAK_:
attachments.append({})
else:
attachments[-1][key] = self.attachments[key]
subtables = [buildCursivePosSubtable(s, self.glyphMap) for s in attachments]
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.attachments[(self.SUBTABLE_BREAK_, location)] = (
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
)
class MarkBasePosBuilder(LookupBuilder):
"""Builds a Mark-To-Base Positioning (GPOS4) lookup.
Users are expected to manually add marks and bases to the ``marks``
and ``bases`` attributes after the object has been initialized, e.g.::
builder.marks["acute"] = (0, a1)
builder.marks["grave"] = (0, a1)
builder.marks["cedilla"] = (1, a2)
builder.bases["a"] = {0: a3, 1: a5}
builder.bases["b"] = {0: a4, 1: a5}
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
marks: An dictionary mapping a glyph name to a two-element
tuple containing a mark class ID and ``otTables.Anchor`` object.
bases: An dictionary mapping a glyph name to a dictionary of
mark class IDs and ``otTables.Anchor`` object.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 4)
self.marks = {} # glyphName -> (markClassName, anchor)
self.bases = {} # glyphName -> {markClassName: anchor}
self.subtables_ = []
def get_subtables_(self):
subtables_ = self.subtables_
if self.bases or self.marks:
subtables_.append((self.marks, self.bases))
return subtables_
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.get_subtables_() == other.get_subtables_()
)
def inferGlyphClasses(self):
result = {}
for marks, bases in self.get_subtables_():
result.update({glyph: 1 for glyph in bases})
result.update({glyph: 3 for glyph in marks})
return result
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the mark-to-base
positioning lookup.
"""
subtables = []
for subtable in self.get_subtables_():
markClasses = self.buildMarkClasses_(subtable[0])
marks = {}
for mark, (mc, anchor) in subtable[0].items():
if mc not in markClasses:
raise ValueError(
"Mark class %s not found for mark glyph %s" % (mc, mark)
)
marks[mark] = (markClasses[mc], anchor)
bases = {}
for glyph, anchors in subtable[1].items():
bases[glyph] = {}
for mc, anchor in anchors.items():
if mc not in markClasses:
raise ValueError(
"Mark class %s not found for base glyph %s" % (mc, glyph)
)
bases[glyph][markClasses[mc]] = anchor
subtables.append(buildMarkBasePosSubtable(marks, bases, self.glyphMap))
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.subtables_.append((self.marks, self.bases))
self.marks = {}
self.bases = {}
class MarkLigPosBuilder(LookupBuilder):
"""Builds a Mark-To-Ligature Positioning (GPOS5) lookup.
Users are expected to manually add marks and bases to the ``marks``
and ``ligatures`` attributes after the object has been initialized, e.g.::
builder.marks["acute"] = (0, a1)
builder.marks["grave"] = (0, a1)
builder.marks["cedilla"] = (1, a2)
builder.ligatures["f_i"] = [
{ 0: a3, 1: a5 }, # f
{ 0: a4, 1: a5 } # i
]
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
marks: An dictionary mapping a glyph name to a two-element
tuple containing a mark class ID and ``otTables.Anchor`` object.
ligatures: An dictionary mapping a glyph name to an array with one
element for each ligature component. Each array element should be
a dictionary mapping mark class IDs to ``otTables.Anchor`` objects.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 5)
self.marks = {} # glyphName -> (markClassName, anchor)
self.ligatures = {} # glyphName -> [{markClassName: anchor}, ...]
self.subtables_ = []
def get_subtables_(self):
subtables_ = self.subtables_
if self.ligatures or self.marks:
subtables_.append((self.marks, self.ligatures))
return subtables_
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.get_subtables_() == other.get_subtables_()
)
def inferGlyphClasses(self):
result = {}
for marks, ligatures in self.get_subtables_():
result.update({glyph: 2 for glyph in ligatures})
result.update({glyph: 3 for glyph in marks})
return result
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the mark-to-ligature
positioning lookup.
"""
subtables = []
for subtable in self.get_subtables_():
markClasses = self.buildMarkClasses_(subtable[0])
marks = {
mark: (markClasses[mc], anchor)
for mark, (mc, anchor) in subtable[0].items()
}
ligs = {}
for lig, components in subtable[1].items():
ligs[lig] = []
for c in components:
ligs[lig].append({markClasses[mc]: a for mc, a in c.items()})
subtables.append(buildMarkLigPosSubtable(marks, ligs, self.glyphMap))
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.subtables_.append((self.marks, self.ligatures))
self.marks = {}
self.ligatures = {}
class MarkMarkPosBuilder(LookupBuilder):
"""Builds a Mark-To-Mark Positioning (GPOS6) lookup.
Users are expected to manually add marks and bases to the ``marks``
and ``baseMarks`` attributes after the object has been initialized, e.g.::
builder.marks["acute"] = (0, a1)
builder.marks["grave"] = (0, a1)
builder.marks["cedilla"] = (1, a2)
builder.baseMarks["acute"] = {0: a3}
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
marks: An dictionary mapping a glyph name to a two-element
tuple containing a mark class ID and ``otTables.Anchor`` object.
baseMarks: An dictionary mapping a glyph name to a dictionary
containing one item: a mark class ID and a ``otTables.Anchor`` object.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 6)
self.marks = {} # glyphName -> (markClassName, anchor)
self.baseMarks = {} # glyphName -> {markClassName: anchor}
self.subtables_ = []
def get_subtables_(self):
subtables_ = self.subtables_
if self.baseMarks or self.marks:
subtables_.append((self.marks, self.baseMarks))
return subtables_
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.get_subtables_() == other.get_subtables_()
)
def inferGlyphClasses(self):
result = {}
for marks, baseMarks in self.get_subtables_():
result.update({glyph: 3 for glyph in baseMarks})
result.update({glyph: 3 for glyph in marks})
return result
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the mark-to-mark
positioning lookup.
"""
subtables = []
for subtable in self.get_subtables_():
markClasses = self.buildMarkClasses_(subtable[0])
markClassList = sorted(markClasses.keys(), key=markClasses.get)
marks = {
mark: (markClasses[mc], anchor)
for mark, (mc, anchor) in subtable[0].items()
}
st = ot.MarkMarkPos()
st.Format = 1
st.ClassCount = len(markClasses)
st.Mark1Coverage = buildCoverage(marks, self.glyphMap)
st.Mark2Coverage = buildCoverage(subtable[1], self.glyphMap)
st.Mark1Array = buildMarkArray(marks, self.glyphMap)
st.Mark2Array = ot.Mark2Array()
st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs)
st.Mark2Array.Mark2Record = []
for base in st.Mark2Coverage.glyphs:
anchors = [subtable[1][base].get(mc) for mc in markClassList]
st.Mark2Array.Mark2Record.append(buildMark2Record(anchors))
subtables.append(st)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.subtables_.append((self.marks, self.baseMarks))
self.marks = {}
self.baseMarks = {}
class ReverseChainSingleSubstBuilder(LookupBuilder):
"""Builds a Reverse Chaining Contextual Single Substitution (GSUB8) lookup.
Users are expected to manually add substitutions to the ``substitutions``
attribute after the object has been initialized, e.g.::
# reversesub [a e n] d' by d.alt;
prefix = [ ["a", "e", "n"] ]
suffix = []
mapping = { "d": "d.alt" }
builder.substitutions.append( (prefix, suffix, mapping) )
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
substitutions: A three-element tuple consisting of a prefix sequence,
a suffix sequence, and a dictionary of single substitutions.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 8)
self.rules = [] # (prefix, suffix, mapping)
def equals(self, other):
return LookupBuilder.equals(self, other) and self.rules == other.rules
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the chained
contextual substitution lookup.
"""
subtables = []
for prefix, suffix, mapping in self.rules:
st = ot.ReverseChainSingleSubst()
st.Format = 1
self.setBacktrackCoverage_(prefix, st)
self.setLookAheadCoverage_(suffix, st)
st.Coverage = buildCoverage(mapping.keys(), self.glyphMap)
st.GlyphCount = len(mapping)
st.Substitute = [mapping[g] for g in st.Coverage.glyphs]
subtables.append(st)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
# Nothing to do here, each substitution is in its own subtable.
pass
class AnySubstBuilder(LookupBuilder):
"""A temporary builder for Single, Multiple, or Ligature substitution lookup.
Users are expected to manually add substitutions to the ``mapping``
attribute after the object has been initialized, e.g.::
# sub x by y;
builder.mapping[("x",)] = ("y",)
# sub a by b c;
builder.mapping[("a",)] = ("b", "c")
# sub f i by f_i;
builder.mapping[("f", "i")] = ("f_i",)
Then call `promote_lookup_type()` to convert this builder into the
appropriate type of substitution lookup builder. This would promote single
substitutions to either multiple or ligature substitutions, depending on the
rest of the rules in the mapping.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: An ordered dictionary mapping a tuple of glyph names to another
tuple of glyph names.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 0)
self.mapping = OrderedDict()
def _add_to_single_subst(self, builder, key, value):
if key[0] != self.SUBTABLE_BREAK_:
key = key[0]
builder.mapping[key] = value[0]
def _add_to_multiple_subst(self, builder, key, value):
if key[0] != self.SUBTABLE_BREAK_:
key = key[0]
builder.mapping[key] = value
def _add_to_ligature_subst(self, builder, key, value):
builder.ligatures[key] = value[0]
def can_add_mapping(self, mapping) -> bool:
if mapping is None:
return True
# single sub rules can be treated as (degenerate) liga-or-multi sub
# rules, but multi and liga sub rules themselves have incompatible
# representations. It is uncommon that these are in the same set of
# rules, but it happens.
is_multi = any(len(v) > 1 for v in mapping.values())
is_liga = any(len(k) > 1 for k in mapping.keys())
has_existing_multi = False
has_existing_liga = False
for k, v in self.mapping.items():
if k[0] == self.SUBTABLE_BREAK_:
continue
if len(k) > 1:
has_existing_liga = True
if len(v) > 1:
has_existing_multi = True
can_reuse = not (has_existing_multi and is_liga) and not (
has_existing_liga and is_multi
)
return can_reuse
def promote_lookup_type(self, is_named_lookup):
# https://github.com/fonttools/fonttools/issues/612
# A multiple substitution may have a single destination, in which case
# it will look just like a single substitution. So if there are both
# multiple and single substitutions, upgrade all the single ones to
# multiple substitutions. Similarly, a ligature substitution may have a
# single source glyph, so if there are both ligature and single
# substitutions, upgrade all the single ones to ligature substitutions.
builder_classes = []
for key, value in self.mapping.items():
if key[0] == self.SUBTABLE_BREAK_:
builder_classes.append(None)
elif len(key) == 1 and len(value) == 1:
builder_classes.append(SingleSubstBuilder)
elif len(key) == 1 and len(value) != 1:
builder_classes.append(MultipleSubstBuilder)
elif len(key) > 1 and len(value) == 1:
builder_classes.append(LigatureSubstBuilder)
else:
assert False, "Should not happen"
has_multiple = any(b is MultipleSubstBuilder for b in builder_classes)
has_ligature = any(b is LigatureSubstBuilder for b in builder_classes)
# If we have mixed single and multiple substitutions,
# upgrade all single substitutions to multiple substitutions.
to_multiple = has_multiple and not has_ligature
# If we have mixed single and ligature substitutions,
# upgrade all single substitutions to ligature substitutions.
to_ligature = has_ligature and not has_multiple
# If we have only single substitutions, we can keep them as is.
to_single = not has_ligature and not has_multiple
ret = []
if to_single:
builder = SingleSubstBuilder(self.font, self.location)
for key, value in self.mapping.items():
self._add_to_single_subst(builder, key, value)
ret = [builder]
elif to_multiple:
builder = MultipleSubstBuilder(self.font, self.location)
for key, value in self.mapping.items():
self._add_to_multiple_subst(builder, key, value)
ret = [builder]
elif to_ligature:
builder = LigatureSubstBuilder(self.font, self.location)
for key, value in self.mapping.items():
self._add_to_ligature_subst(builder, key, value)
ret = [builder]
elif is_named_lookup:
# This is a named lookup with mixed substitutions that can’t be promoted,
# since we can’t split it into multiple lookups, we return None here to
# signal that to the caller
return None
else:
curr_builder = None
for builder_class, (key, value) in zip(
builder_classes, self.mapping.items()
):
if curr_builder is None or type(curr_builder) is not builder_class:
curr_builder = builder_class(self.font, self.location)
ret.append(curr_builder)
if builder_class is SingleSubstBuilder:
self._add_to_single_subst(curr_builder, key, value)
elif builder_class is MultipleSubstBuilder:
self._add_to_multiple_subst(curr_builder, key, value)
elif builder_class is LigatureSubstBuilder:
self._add_to_ligature_subst(curr_builder, key, value)
else:
assert False, "Should not happen"
for builder in ret:
builder.extension = self.extension
builder.lookupflag = self.lookupflag
builder.markFilterSet = self.markFilterSet
return ret
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
assert False
def getAlternateGlyphs(self):
return {
key[0]: value
for key, value in self.mapping.items()
if len(key) == 1 and len(value) == 1
}
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class SingleSubstBuilder(LookupBuilder):
"""Builds a Single Substitution (GSUB1) lookup.
Users are expected to manually add substitutions to the ``mapping``
attribute after the object has been initialized, e.g.::
# sub x by y;
builder.mapping["x"] = "y"
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: A dictionary mapping a single glyph name to another glyph name.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 1)
self.mapping = OrderedDict()
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the multiple
substitution lookup.
"""
subtables = self.build_subst_subtables(self.mapping, buildSingleSubstSubtable)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
return {glyph: [repl] for glyph, repl in self.mapping.items()}
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class ClassPairPosSubtableBuilder(object):
"""Builds class-based Pair Positioning (GPOS2 format 2) subtables.
Note that this does *not* build a GPOS2 ``otTables.Lookup`` directly,
but builds a list of ``otTables.PairPos`` subtables. It is used by the
:class:`PairPosBuilder` below.
Attributes:
builder (PairPosBuilder): A pair positioning lookup builder.
"""
def __init__(self, builder):
self.builder_ = builder
self.classDef1_, self.classDef2_ = None, None
self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2)
self.forceSubtableBreak_ = False
self.subtables_ = []
def addPair(self, gc1, value1, gc2, value2):
"""Add a pair positioning rule.
Args:
gc1: A set of glyph names for the "left" glyph
value1: An ``otTables.ValueRecord`` object for the left glyph's
positioning.
gc2: A set of glyph names for the "right" glyph
value2: An ``otTables.ValueRecord`` object for the right glyph's
positioning.
"""
mergeable = (
not self.forceSubtableBreak_
and self.classDef1_ is not None
and self.classDef1_.canAdd(gc1)
and self.classDef2_ is not None
and self.classDef2_.canAdd(gc2)
)
if not mergeable:
self.flush_()
self.classDef1_ = ClassDefBuilder(useClass0=True)
self.classDef2_ = ClassDefBuilder(useClass0=False)
self.values_ = {}
self.classDef1_.add(gc1)
self.classDef2_.add(gc2)
self.values_[(gc1, gc2)] = (value1, value2)
def addSubtableBreak(self):
"""Add an explicit subtable break at this point."""
self.forceSubtableBreak_ = True
def subtables(self):
"""Return the list of ``otTables.PairPos`` subtables constructed."""
self.flush_()
return self.subtables_
def flush_(self):
if self.classDef1_ is None or self.classDef2_ is None:
return
st = buildPairPosClassesSubtable(self.values_, self.builder_.glyphMap)
if st.Coverage is None:
return
self.subtables_.append(st)
self.forceSubtableBreak_ = False
class PairPosBuilder(LookupBuilder):
"""Builds a Pair Positioning (GPOS2) lookup.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
pairs: An array of class-based pair positioning tuples. Usually
manipulated with the :meth:`addClassPair` method below.
glyphPairs: A dictionary mapping a tuple of glyph names to a tuple
of ``otTables.ValueRecord`` objects. Usually manipulated with the
:meth:`addGlyphPair` method below.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 2)
self.pairs = [] # [(gc1, value1, gc2, value2)*]
self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2)
self.locations = {} # (gc1, gc2) --> (filepath, line, column)
def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2):
"""Add a class pair positioning rule to the current lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this rule. Unused.
glyphclass1: A set of glyph names for the "left" glyph in the pair.
value1: A ``otTables.ValueRecord`` for positioning the left glyph.
glyphclass2: A set of glyph names for the "right" glyph in the pair.
value2: A ``otTables.ValueRecord`` for positioning the right glyph.
"""
self.pairs.append((glyphclass1, value1, glyphclass2, value2))
def addGlyphPair(self, location, glyph1, value1, glyph2, value2):
"""Add a glyph pair positioning rule to the current lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this rule.
glyph1: A glyph name for the "left" glyph in the pair.
value1: A ``otTables.ValueRecord`` for positioning the left glyph.
glyph2: A glyph name for the "right" glyph in the pair.
value2: A ``otTables.ValueRecord`` for positioning the right glyph.
"""
key = (glyph1, glyph2)
oldValue = self.glyphPairs.get(key, None)
if oldValue is not None:
# the Feature File spec explicitly allows specific pairs generated
# by an 'enum' rule to be overridden by preceding single pairs
otherLoc = self.locations[key]
log.debug(
"Already defined position for pair %s %s at %s; "
"choosing the first value",
glyph1,
glyph2,
otherLoc,
)
else:
self.glyphPairs[key] = (value1, value2)
self.locations[key] = location
def add_subtable_break(self, location):
self.pairs.append(
(
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
)
)
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.glyphPairs == other.glyphPairs
and self.pairs == other.pairs
)
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the pair positioning
lookup.
"""
builders = {}
builder = ClassPairPosSubtableBuilder(self)
for glyphclass1, value1, glyphclass2, value2 in self.pairs:
if glyphclass1 is self.SUBTABLE_BREAK_:
builder.addSubtableBreak()
continue
builder.addPair(glyphclass1, value1, glyphclass2, value2)
subtables = []
if self.glyphPairs:
subtables.extend(buildPairPosGlyphs(self.glyphPairs, self.glyphMap))
subtables.extend(builder.subtables())
lookup = self.buildLookup_(subtables)
# Compact the lookup
# This is a good moment to do it because the compaction should create
# smaller subtables, which may prevent overflows from happening.
# Keep reading the value from the ENV until ufo2ft switches to the config system
level = self.font.cfg.get(
"fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
default=_compression_level_from_env(),
)
if level != 0:
log.info("Compacting GPOS...")
compact_lookup(self.font, level, lookup)
return lookup
class SinglePosBuilder(LookupBuilder):
"""Builds a Single Positioning (GPOS1) lookup.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: A dictionary mapping a glyph name to a ``otTables.ValueRecord``
objects. Usually manipulated with the :meth:`add_pos` method below.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 1)
self.locations = {} # glyph -> (filename, line, column)
self.mapping = {} # glyph -> ot.ValueRecord
def add_pos(self, location, glyph, otValueRecord):
"""Add a single positioning rule.
Args:
location: A string or tuple representing the location in the
original source which produced this lookup.
glyph: A glyph name.
otValueRection: A ``otTables.ValueRecord`` used to position the
glyph.
"""
if otValueRecord is None:
otValueRecord = ValueRecord()
if not self.can_add(glyph, otValueRecord):
otherLoc = self.locations[glyph]
raise OpenTypeLibError(
'Already defined different position for glyph "%s" at %s'
% (glyph, otherLoc),
location,
)
if otValueRecord:
self.mapping[glyph] = otValueRecord
self.locations[glyph] = location
def can_add(self, glyph, value):
assert isinstance(value, ValueRecord)
curValue = self.mapping.get(glyph)
return curValue is None or curValue == value
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the single positioning
lookup.
"""
subtables = buildSinglePos(self.mapping, self.glyphMap)
return self.buildLookup_(subtables)
# GSUB
def buildSingleSubstSubtable(mapping):
"""Builds a single substitution (GSUB1) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.SingleSubstBuilder` instead.
Args:
mapping: A dictionary mapping input glyph names to output glyph names.
Returns:
An ``otTables.SingleSubst`` object, or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.SingleSubst()
self.mapping = dict(mapping)
return self
def buildMultipleSubstSubtable(mapping):
"""Builds a multiple substitution (GSUB2) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.MultipleSubstBuilder` instead.
Example::
# sub uni06C0 by uni06D5.fina hamza.above
# sub uni06C2 by uni06C1.fina hamza.above;
subtable = buildMultipleSubstSubtable({
"uni06C0": [ "uni06D5.fina", "hamza.above"],
"uni06C2": [ "uni06D1.fina", "hamza.above"]
})
Args:
mapping: A dictionary mapping input glyph names to a list of output
glyph names.
Returns:
An ``otTables.MultipleSubst`` object or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.MultipleSubst()
self.mapping = dict(mapping)
return self
def buildAlternateSubstSubtable(mapping):
"""Builds an alternate substitution (GSUB3) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.AlternateSubstBuilder` instead.
Args:
mapping: A dictionary mapping input glyph names to a list of output
glyph names.
Returns:
An ``otTables.AlternateSubst`` object or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.AlternateSubst()
self.alternates = dict(mapping)
return self
def buildLigatureSubstSubtable(mapping):
"""Builds a ligature substitution (GSUB4) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.LigatureSubstBuilder` instead.
Example::
# sub f f i by f_f_i;
# sub f i by f_i;
subtable = buildLigatureSubstSubtable({
("f", "f", "i"): "f_f_i",
("f", "i"): "f_i",
})
Args:
mapping: A dictionary mapping tuples of glyph names to output
glyph names.
Returns:
An ``otTables.LigatureSubst`` object or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.LigatureSubst()
# The following single line can replace the rest of this function
# with fontTools >= 3.1:
# self.ligatures = dict(mapping)
self.ligatures = {}
for components in sorted(mapping.keys(), key=self._getLigatureSortKey):
ligature = ot.Ligature()
ligature.Component = components[1:]
ligature.CompCount = len(ligature.Component) + 1
ligature.LigGlyph = mapping[components]
firstGlyph = components[0]
self.ligatures.setdefault(firstGlyph, []).append(ligature)
return self
# GPOS
def buildAnchor(x, y, point=None, deviceX=None, deviceY=None):
"""Builds an Anchor table.
This determines the appropriate anchor format based on the passed parameters.
Args:
x (int): X coordinate.
y (int): Y coordinate.
point (int): Index of glyph contour point, if provided.
deviceX (``otTables.Device``): X coordinate device table, if provided.
deviceY (``otTables.Device``): Y coordinate device table, if provided.
Returns:
An ``otTables.Anchor`` object.
"""
self = ot.Anchor()
self.XCoordinate, self.YCoordinate = x, y
self.Format = 1
if point is not None:
self.AnchorPoint = point
self.Format = 2
if deviceX is not None or deviceY is not None:
assert (
self.Format == 1
), "Either point, or both of deviceX/deviceY, must be None."
self.XDeviceTable = deviceX
self.YDeviceTable = deviceY
self.Format = 3
return self
def buildBaseArray(bases, numMarkClasses, glyphMap):
"""Builds a base array record.
As part of building mark-to-base positioning rules, you will need to define
a ``BaseArray`` record, which "defines for each base glyph an array of
anchors, one for each mark class." This function builds the base array
subtable.
Example::
bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}}
basearray = buildBaseArray(bases, 2, font.getReverseGlyphMap())
Args:
bases (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being dictionaries mapping mark class ID
to the appropriate ``otTables.Anchor`` object used for attaching marks
of that class.
numMarkClasses (int): The total number of mark classes for which anchors
are defined.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.BaseArray`` object.
"""
self = ot.BaseArray()
self.BaseRecord = []
for base in sorted(bases, key=glyphMap.__getitem__):
b = bases[base]
anchors = [b.get(markClass) for markClass in range(numMarkClasses)]
self.BaseRecord.append(buildBaseRecord(anchors))
self.BaseCount = len(self.BaseRecord)
return self
def buildBaseRecord(anchors):
# [otTables.Anchor, otTables.Anchor, ...] --> otTables.BaseRecord
self = ot.BaseRecord()
self.BaseAnchor = anchors
return self
def buildComponentRecord(anchors):
"""Builds a component record.
As part of building mark-to-ligature positioning rules, you will need to
define ``ComponentRecord`` objects, which contain "an array of offsets...
to the Anchor tables that define all the attachment points used to attach
marks to the component." This function builds the component record.
Args:
anchors: A list of ``otTables.Anchor`` objects or ``None``.
Returns:
A ``otTables.ComponentRecord`` object or ``None`` if no anchors are
supplied.
"""
if not anchors:
return None
self = ot.ComponentRecord()
self.LigatureAnchor = anchors
return self
def buildCursivePosSubtable(attach, glyphMap):
"""Builds a cursive positioning (GPOS3) subtable.
Cursive positioning lookups are made up of a coverage table of glyphs,
and a set of ``EntryExitRecord`` records containing the anchors for
each glyph. This function builds the cursive positioning subtable.
Example::
subtable = buildCursivePosSubtable({
"AlifIni": (None, buildAnchor(0, 50)),
"BehMed": (buildAnchor(500,250), buildAnchor(0,50)),
# ...
}, font.getReverseGlyphMap())
Args:
attach (dict): A mapping between glyph names and a tuple of two
``otTables.Anchor`` objects representing entry and exit anchors.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.CursivePos`` object, or ``None`` if the attachment
dictionary was empty.
"""
if not attach:
return None
self = ot.CursivePos()
self.Format = 1
self.Coverage = buildCoverage(attach.keys(), glyphMap)
self.EntryExitRecord = []
for glyph in self.Coverage.glyphs:
entryAnchor, exitAnchor = attach[glyph]
rec = ot.EntryExitRecord()
rec.EntryAnchor = entryAnchor
rec.ExitAnchor = exitAnchor
self.EntryExitRecord.append(rec)
self.EntryExitCount = len(self.EntryExitRecord)
return self
def buildDevice(deltas):
"""Builds a Device record as part of a ValueRecord or Anchor.
Device tables specify size-specific adjustments to value records
and anchors to reflect changes based on the resolution of the output.
For example, one could specify that an anchor's Y position should be
increased by 1 pixel when displayed at 8 pixels per em. This routine
builds device records.
Args:
deltas: A dictionary mapping pixels-per-em sizes to the delta
adjustment in pixels when the font is displayed at that size.
Returns:
An ``otTables.Device`` object if any deltas were supplied, or
``None`` otherwise.
"""
if not deltas:
return None
self = ot.Device()
keys = deltas.keys()
self.StartSize = startSize = min(keys)
self.EndSize = endSize = max(keys)
assert 0 <= startSize <= endSize
self.DeltaValue = deltaValues = [
deltas.get(size, 0) for size in range(startSize, endSize + 1)
]
maxDelta = max(deltaValues)
minDelta = min(deltaValues)
assert minDelta > -129 and maxDelta < 128
if minDelta > -3 and maxDelta < 2:
self.DeltaFormat = 1
elif minDelta > -9 and maxDelta < 8:
self.DeltaFormat = 2
else:
self.DeltaFormat = 3
return self
def buildLigatureArray(ligs, numMarkClasses, glyphMap):
"""Builds a LigatureArray subtable.
As part of building a mark-to-ligature lookup, you will need to define
the set of anchors (for each mark class) on each component of the ligature
where marks can be attached. For example, for an Arabic divine name ligature
(lam lam heh), you may want to specify mark attachment positioning for
superior marks (fatha, etc.) and inferior marks (kasra, etc.) on each glyph
of the ligature. This routine builds the ligature array record.
Example::
buildLigatureArray({
"lam-lam-heh": [
{ 0: superiorAnchor1, 1: inferiorAnchor1 }, # attach points for lam1
{ 0: superiorAnchor2, 1: inferiorAnchor2 }, # attach points for lam2
{ 0: superiorAnchor3, 1: inferiorAnchor3 }, # attach points for heh
]
}, 2, font.getReverseGlyphMap())
Args:
ligs (dict): A mapping of ligature names to an array of dictionaries:
for each component glyph in the ligature, an dictionary mapping
mark class IDs to anchors.
numMarkClasses (int): The number of mark classes.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.LigatureArray`` object if deltas were supplied.
"""
self = ot.LigatureArray()
self.LigatureAttach = []
for lig in sorted(ligs, key=glyphMap.__getitem__):
anchors = []
for component in ligs[lig]:
anchors.append([component.get(mc) for mc in range(numMarkClasses)])
self.LigatureAttach.append(buildLigatureAttach(anchors))
self.LigatureCount = len(self.LigatureAttach)
return self
def buildLigatureAttach(components):
# [[Anchor, Anchor], [Anchor, Anchor, Anchor]] --> LigatureAttach
self = ot.LigatureAttach()
self.ComponentRecord = [buildComponentRecord(c) for c in components]
self.ComponentCount = len(self.ComponentRecord)
return self
def buildMarkArray(marks, glyphMap):
"""Builds a mark array subtable.
As part of building mark-to-* positioning rules, you will need to define
a MarkArray subtable, which "defines the class and the anchor point
for a mark glyph." This function builds the mark array subtable.
Example::
mark = {
"acute": (0, buildAnchor(300,712)),
# ...
}
markarray = buildMarkArray(marks, font.getReverseGlyphMap())
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.MarkArray`` object.
"""
self = ot.MarkArray()
self.MarkRecord = []
for mark in sorted(marks.keys(), key=glyphMap.__getitem__):
markClass, anchor = marks[mark]
markrec = buildMarkRecord(markClass, anchor)
self.MarkRecord.append(markrec)
self.MarkCount = len(self.MarkRecord)
return self
@deprecateFunction(
"use buildMarkBasePosSubtable() instead", category=DeprecationWarning
)
def buildMarkBasePos(marks, bases, glyphMap):
"""Build a list of MarkBasePos (GPOS4) subtables.
.. deprecated:: 4.58.0
Use :func:`buildMarkBasePosSubtable` instead.
"""
return [buildMarkBasePosSubtable(marks, bases, glyphMap)]
def buildMarkBasePosSubtable(marks, bases, glyphMap):
"""Build a single MarkBasePos (GPOS4) subtable.
This builds a mark-to-base lookup subtable containing all of the referenced
marks and bases.
Example::
# a1, a2, a3, a4, a5 = buildAnchor(500, 100), ...
marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)}
bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}}
markbaseposes = [buildMarkBasePosSubtable(marks, bases, font.getReverseGlyphMap())]
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point. (See :func:`buildMarkArray`.)
bases (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being dictionaries mapping mark class ID
to the appropriate ``otTables.Anchor`` object used for attaching marks
of that class. (See :func:`buildBaseArray`.)
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.MarkBasePos`` object.
"""
self = ot.MarkBasePos()
self.Format = 1
self.MarkCoverage = buildCoverage(marks, glyphMap)
self.MarkArray = buildMarkArray(marks, glyphMap)
self.ClassCount = max([mc for mc, _ in marks.values()]) + 1
self.BaseCoverage = buildCoverage(bases, glyphMap)
self.BaseArray = buildBaseArray(bases, self.ClassCount, glyphMap)
return self
@deprecateFunction("use buildMarkLigPosSubtable() instead", category=DeprecationWarning)
def buildMarkLigPos(marks, ligs, glyphMap):
"""Build a list of MarkLigPos (GPOS5) subtables.
.. deprecated:: 4.58.0
Use :func:`buildMarkLigPosSubtable` instead.
"""
return [buildMarkLigPosSubtable(marks, ligs, glyphMap)]
def buildMarkLigPosSubtable(marks, ligs, glyphMap):
"""Build a single MarkLigPos (GPOS5) subtable.
This builds a mark-to-base lookup subtable containing all of the referenced
marks and bases.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.MarkLigPosBuilder` instead.
Example::
# a1, a2, a3, a4, a5 = buildAnchor(500, 100), ...
marks = {
"acute": (0, a1),
"grave": (0, a1),
"cedilla": (1, a2)
}
ligs = {
"f_i": [
{ 0: a3, 1: a5 }, # f
{ 0: a4, 1: a5 } # i
],
# "c_t": [{...}, {...}]
}
markligpose = buildMarkLigPosSubtable(marks, ligs,
font.getReverseGlyphMap())
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point. (See :func:`buildMarkArray`.)
ligs (dict): A mapping of ligature names to an array of dictionaries:
for each component glyph in the ligature, an dictionary mapping
mark class IDs to anchors. (See :func:`buildLigatureArray`.)
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.MarkLigPos`` object.
"""
self = ot.MarkLigPos()
self.Format = 1
self.MarkCoverage = buildCoverage(marks, glyphMap)
self.MarkArray = buildMarkArray(marks, glyphMap)
self.ClassCount = max([mc for mc, _ in marks.values()]) + 1
self.LigatureCoverage = buildCoverage(ligs, glyphMap)
self.LigatureArray = buildLigatureArray(ligs, self.ClassCount, glyphMap)
return self
def buildMarkRecord(classID, anchor):
assert isinstance(classID, int)
assert isinstance(anchor, ot.Anchor)
self = ot.MarkRecord()
self.Class = classID
self.MarkAnchor = anchor
return self
def buildMark2Record(anchors):
# [otTables.Anchor, otTables.Anchor, ...] --> otTables.Mark2Record
self = ot.Mark2Record()
self.Mark2Anchor = anchors
return self
def _getValueFormat(f, values, i):
# Helper for buildPairPos{Glyphs|Classes}Subtable.
if f is not None:
return f
mask = 0
for value in values:
if value is not None and value[i] is not None:
mask |= value[i].getFormat()
return mask
def buildPairPosClassesSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None):
"""Builds a class pair adjustment (GPOS2 format 2) subtable.
Kerning tables are generally expressed as pair positioning tables using
class-based pair adjustments. This routine builds format 2 PairPos
subtables.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.ClassPairPosSubtableBuilder`
instead, as this takes care of ensuring that the supplied pairs can be
formed into non-overlapping classes and emitting individual subtables
whenever the non-overlapping requirement means that a new subtable is
required.
Example::
pairs = {}
pairs[(
[ "K", "X" ],
[ "W", "V" ]
)] = ( buildValue(xAdvance=+5), buildValue() )
# pairs[(... , ...)] = (..., ...)
pairpos = buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap())
Args:
pairs (dict): Pair positioning data; the keys being a two-element
tuple of lists of glyphnames, and the values being a two-element
tuple of ``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
valueFormat1: Force the "left" value records to the given format.
valueFormat2: Force the "right" value records to the given format.
Returns:
A ``otTables.PairPos`` object.
"""
coverage = set()
classDef1 = ClassDefBuilder(useClass0=True)
classDef2 = ClassDefBuilder(useClass0=False)
for gc1, gc2 in sorted(pairs):
coverage.update(gc1)
classDef1.add(gc1)
classDef2.add(gc2)
self = ot.PairPos()
self.Format = 2
valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0)
valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1)
self.Coverage = buildCoverage(coverage, glyphMap)
self.ClassDef1 = classDef1.build()
self.ClassDef2 = classDef2.build()
classes1 = classDef1.classes()
classes2 = classDef2.classes()
self.Class1Record = []
for c1 in classes1:
rec1 = ot.Class1Record()
rec1.Class2Record = []
self.Class1Record.append(rec1)
for c2 in classes2:
rec2 = ot.Class2Record()
val1, val2 = pairs.get((c1, c2), (None, None))
rec2.Value1 = (
ValueRecord(src=val1, valueFormat=valueFormat1)
if valueFormat1
else None
)
rec2.Value2 = (
ValueRecord(src=val2, valueFormat=valueFormat2)
if valueFormat2
else None
)
rec1.Class2Record.append(rec2)
self.Class1Count = len(self.Class1Record)
self.Class2Count = len(classes2)
return self
def buildPairPosGlyphs(pairs, glyphMap):
"""Builds a list of glyph-based pair adjustment (GPOS2 format 1) subtables.
This organises a list of pair positioning adjustments into subtables based
on common value record formats.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder`
instead.
Example::
pairs = {
("K", "W"): ( buildValue(xAdvance=+5), buildValue() ),
("K", "V"): ( buildValue(xAdvance=+5), buildValue() ),
# ...
}
subtables = buildPairPosGlyphs(pairs, font.getReverseGlyphMap())
Args:
pairs (dict): Pair positioning data; the keys being a two-element
tuple of glyphnames, and the values being a two-element
tuple of ``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A list of ``otTables.PairPos`` objects.
"""
p = {} # (formatA, formatB) --> {(glyphA, glyphB): (valA, valB)}
for (glyphA, glyphB), (valA, valB) in pairs.items():
formatA = valA.getFormat() if valA is not None else 0
formatB = valB.getFormat() if valB is not None else 0
pos = p.setdefault((formatA, formatB), {})
pos[(glyphA, glyphB)] = (valA, valB)
return [
buildPairPosGlyphsSubtable(pos, glyphMap, formatA, formatB)
for ((formatA, formatB), pos) in sorted(p.items())
]
def buildPairPosGlyphsSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None):
"""Builds a single glyph-based pair adjustment (GPOS2 format 1) subtable.
This builds a PairPos subtable from a dictionary of glyph pairs and
their positioning adjustments. See also :func:`buildPairPosGlyphs`.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder` instead.
Example::
pairs = {
("K", "W"): ( buildValue(xAdvance=+5), buildValue() ),
("K", "V"): ( buildValue(xAdvance=+5), buildValue() ),
# ...
}
pairpos = buildPairPosGlyphsSubtable(pairs, font.getReverseGlyphMap())
Args:
pairs (dict): Pair positioning data; the keys being a two-element
tuple of glyphnames, and the values being a two-element
tuple of ``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
valueFormat1: Force the "left" value records to the given format.
valueFormat2: Force the "right" value records to the given format.
Returns:
A ``otTables.PairPos`` object.
"""
self = ot.PairPos()
self.Format = 1
valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0)
valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1)
p = {}
for (glyphA, glyphB), (valA, valB) in pairs.items():
p.setdefault(glyphA, []).append((glyphB, valA, valB))
self.Coverage = buildCoverage({g for g, _ in pairs.keys()}, glyphMap)
self.PairSet = []
for glyph in self.Coverage.glyphs:
ps = ot.PairSet()
ps.PairValueRecord = []
self.PairSet.append(ps)
for glyph2, val1, val2 in sorted(p[glyph], key=lambda x: glyphMap[x[0]]):
pvr = ot.PairValueRecord()
pvr.SecondGlyph = glyph2
pvr.Value1 = (
ValueRecord(src=val1, valueFormat=valueFormat1)
if valueFormat1
else None
)
pvr.Value2 = (
ValueRecord(src=val2, valueFormat=valueFormat2)
if valueFormat2
else None
)
ps.PairValueRecord.append(pvr)
ps.PairValueCount = len(ps.PairValueRecord)
self.PairSetCount = len(self.PairSet)
return self
def buildSinglePos(mapping, glyphMap):
"""Builds a list of single adjustment (GPOS1) subtables.
This builds a list of SinglePos subtables from a dictionary of glyph
names and their positioning adjustments. The format of the subtables are
determined to optimize the size of the resulting subtables.
See also :func:`buildSinglePosSubtable`.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead.
Example::
mapping = {
"V": buildValue({ "xAdvance" : +5 }),
# ...
}
subtables = buildSinglePos(pairs, font.getReverseGlyphMap())
Args:
mapping (dict): A mapping between glyphnames and
``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A list of ``otTables.SinglePos`` objects.
"""
result, handled = [], set()
# In SinglePos format 1, the covered glyphs all share the same ValueRecord.
# In format 2, each glyph has its own ValueRecord, but these records
# all have the same properties (eg., all have an X but no Y placement).
coverages, masks, values = {}, {}, {}
for glyph, value in mapping.items():
key = _getSinglePosValueKey(value)
coverages.setdefault(key, []).append(glyph)
masks.setdefault(key[0], []).append(key)
values[key] = value
# If a ValueRecord is shared between multiple glyphs, we generate
# a SinglePos format 1 subtable; that is the most compact form.
for key, glyphs in coverages.items():
# 5 ushorts is the length of introducing another sublookup
if len(glyphs) * _getSinglePosValueSize(key) > 5:
format1Mapping = {g: values[key] for g in glyphs}
result.append(buildSinglePosSubtable(format1Mapping, glyphMap))
handled.add(key)
# In the remaining ValueRecords, look for those whose valueFormat
# (the set of used properties) is shared between multiple records.
# These will get encoded in format 2.
for valueFormat, keys in masks.items():
f2 = [k for k in keys if k not in handled]
if len(f2) > 1:
format2Mapping = {}
for k in f2:
format2Mapping.update((g, values[k]) for g in coverages[k])
result.append(buildSinglePosSubtable(format2Mapping, glyphMap))
handled.update(f2)
# The remaining ValueRecords are only used by a few glyphs, normally
# one. We encode these in format 1 again.
for key, glyphs in coverages.items():
if key not in handled:
for g in glyphs:
st = buildSinglePosSubtable({g: values[key]}, glyphMap)
result.append(st)
# When the OpenType layout engine traverses the subtables, it will
# stop after the first matching subtable. Therefore, we sort the
# resulting subtables by decreasing coverage size; this increases
# the chance that the layout engine can do an early exit. (Of course,
# this would only be true if all glyphs were equally frequent, which
# is not really the case; but we do not know their distribution).
# If two subtables cover the same number of glyphs, we sort them
# by glyph ID so that our output is deterministic.
result.sort(key=lambda t: _getSinglePosTableKey(t, glyphMap))
return result
def buildSinglePosSubtable(values, glyphMap):
"""Builds a single adjustment (GPOS1) subtable.
This builds a list of SinglePos subtables from a dictionary of glyph
names and their positioning adjustments. The format of the subtable is
determined to optimize the size of the output.
See also :func:`buildSinglePos`.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead.
Example::
mapping = {
"V": buildValue({ "xAdvance" : +5 }),
# ...
}
subtable = buildSinglePos(pairs, font.getReverseGlyphMap())
Args:
mapping (dict): A mapping between glyphnames and
``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.SinglePos`` object.
"""
self = ot.SinglePos()
self.Coverage = buildCoverage(values.keys(), glyphMap)
valueFormat = self.ValueFormat = reduce(
int.__or__, [v.getFormat() for v in values.values()], 0
)
valueRecords = [
ValueRecord(src=values[g], valueFormat=valueFormat)
for g in self.Coverage.glyphs
]
if all(v == valueRecords[0] for v in valueRecords):
self.Format = 1
if self.ValueFormat != 0:
self.Value = valueRecords[0]
else:
self.Value = None
else:
self.Format = 2
self.Value = valueRecords
self.ValueCount = len(self.Value)
return self
def _getSinglePosTableKey(subtable, glyphMap):
assert isinstance(subtable, ot.SinglePos), subtable
glyphs = subtable.Coverage.glyphs
return (-len(glyphs), glyphMap[glyphs[0]])
def _getSinglePosValueKey(valueRecord):
# otBase.ValueRecord --> (2, ("YPlacement": 12))
assert isinstance(valueRecord, ValueRecord), valueRecord
valueFormat, result = 0, []
for name, value in valueRecord.__dict__.items():
if isinstance(value, ot.Device):
result.append((name, _makeDeviceTuple(value)))
else:
result.append((name, value))
valueFormat |= valueRecordFormatDict[name][0]
result.sort()
result.insert(0, valueFormat)
return tuple(result)
_DeviceTuple = namedtuple("_DeviceTuple", "DeltaFormat StartSize EndSize DeltaValue")
def _makeDeviceTuple(device):
# otTables.Device --> tuple, for making device tables unique
return _DeviceTuple(
device.DeltaFormat,
device.StartSize,
device.EndSize,
() if device.DeltaFormat & 0x8000 else tuple(device.DeltaValue),
)
def _getSinglePosValueSize(valueKey):
# Returns how many ushorts this valueKey (short form of ValueRecord) takes up
count = 0
for _, v in valueKey[1:]:
if isinstance(v, _DeviceTuple):
count += len(v.DeltaValue) + 3
else:
count += 1
return count
def buildValue(value):
"""Builds a positioning value record.
Value records are used to specify coordinates and adjustments for
positioning and attaching glyphs. Many of the positioning functions
in this library take ``otTables.ValueRecord`` objects as arguments.
This function builds value records from dictionaries.
Args:
value (dict): A dictionary with zero or more of the following keys:
- ``xPlacement``
- ``yPlacement``
- ``xAdvance``
- ``yAdvance``
- ``xPlaDevice``
- ``yPlaDevice``
- ``xAdvDevice``
- ``yAdvDevice``
Returns:
An ``otTables.ValueRecord`` object.
"""
self = ValueRecord()
for k, v in value.items():
setattr(self, k, v)
return self
# GDEF
def buildAttachList(attachPoints, glyphMap):
"""Builds an AttachList subtable.
A GDEF table may contain an Attachment Point List table (AttachList)
which stores the contour indices of attachment points for glyphs with
attachment points. This routine builds AttachList subtables.
Args:
attachPoints (dict): A mapping between glyph names and a list of
contour indices.
Returns:
An ``otTables.AttachList`` object if attachment points are supplied,
or ``None`` otherwise.
"""
if not attachPoints:
return None
self = ot.AttachList()
self.Coverage = buildCoverage(attachPoints.keys(), glyphMap)
self.AttachPoint = [buildAttachPoint(attachPoints[g]) for g in self.Coverage.glyphs]
self.GlyphCount = len(self.AttachPoint)
return self
def buildAttachPoint(points):
# [4, 23, 41] --> otTables.AttachPoint
# Only used by above.
if not points:
return None
self = ot.AttachPoint()
self.PointIndex = sorted(set(points))
self.PointCount = len(self.PointIndex)
return self
def buildCaretValueForCoord(coord):
# 500 --> otTables.CaretValue, format 1
# (500, DeviceTable) --> otTables.CaretValue, format 3
self = ot.CaretValue()
if isinstance(coord, tuple):
self.Format = 3
self.Coordinate, self.DeviceTable = coord
else:
self.Format = 1
self.Coordinate = coord
return self
def buildCaretValueForPoint(point):
# 4 --> otTables.CaretValue, format 2
self = ot.CaretValue()
self.Format = 2
self.CaretValuePoint = point
return self
def buildLigCaretList(coords, points, glyphMap):
"""Builds a ligature caret list table.
Ligatures appear as a single glyph representing multiple characters; however
when, for example, editing text containing a ``f_i`` ligature, the user may
want to place the cursor between the ``f`` and the ``i``. The ligature caret
list in the GDEF table specifies the position to display the "caret" (the
character insertion indicator, typically a flashing vertical bar) "inside"
the ligature to represent an insertion point. The insertion positions may
be specified either by coordinate or by contour point.
Example::
coords = {
"f_f_i": [300, 600] # f|fi cursor at 300 units, ff|i cursor at 600.
}
points = {
"c_t": [28] # c|t cursor appears at coordinate of contour point 28.
}
ligcaretlist = buildLigCaretList(coords, points, font.getReverseGlyphMap())
Args:
coords: A mapping between glyph names and a list of coordinates for
the insertion point of each ligature component after the first one.
points: A mapping between glyph names and a list of contour points for
the insertion point of each ligature component after the first one.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.LigCaretList`` object if any carets are present, or
``None`` otherwise."""
glyphs = set(coords.keys()) if coords else set()
if points:
glyphs.update(points.keys())
carets = {g: buildLigGlyph(coords.get(g), points.get(g)) for g in glyphs}
carets = {g: c for g, c in carets.items() if c is not None}
if not carets:
return None
self = ot.LigCaretList()
self.Coverage = buildCoverage(carets.keys(), glyphMap)
self.LigGlyph = [carets[g] for g in self.Coverage.glyphs]
self.LigGlyphCount = len(self.LigGlyph)
return self
def buildLigGlyph(coords, points):
# ([500], [4]) --> otTables.LigGlyph; None for empty coords/points
carets = []
if coords:
coords = sorted(coords, key=lambda c: c[0] if isinstance(c, tuple) else c)
carets.extend([buildCaretValueForCoord(c) for c in coords])
if points:
carets.extend([buildCaretValueForPoint(p) for p in sorted(points)])
if not carets:
return None
self = ot.LigGlyph()
self.CaretValue = carets
self.CaretCount = len(self.CaretValue)
return self
def buildMarkGlyphSetsDef(markSets, glyphMap):
"""Builds a mark glyph sets definition table.
OpenType Layout lookups may choose to use mark filtering sets to consider
or ignore particular combinations of marks. These sets are specified by
setting a flag on the lookup, but the mark filtering sets are defined in
the ``GDEF`` table. This routine builds the subtable containing the mark
glyph set definitions.
Example::
set0 = set("acute", "grave")
set1 = set("caron", "grave")
markglyphsets = buildMarkGlyphSetsDef([set0, set1], font.getReverseGlyphMap())
Args:
markSets: A list of sets of glyphnames.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns
An ``otTables.MarkGlyphSetsDef`` object.
"""
if not markSets:
return None
self = ot.MarkGlyphSetsDef()
self.MarkSetTableFormat = 1
self.Coverage = [buildCoverage(m, glyphMap) for m in markSets]
self.MarkSetCount = len(self.Coverage)
return self
class ClassDefBuilder(object):
"""Helper for building ClassDef tables."""
def __init__(self, useClass0):
self.classes_ = set()
self.glyphs_ = {}
self.useClass0_ = useClass0
def canAdd(self, glyphs):
if isinstance(glyphs, (set, frozenset)):
glyphs = sorted(glyphs)
glyphs = tuple(glyphs)
if glyphs in self.classes_:
return True
for glyph in glyphs:
if glyph in self.glyphs_:
return False
return True
def add(self, glyphs):
if isinstance(glyphs, (set, frozenset)):
glyphs = sorted(glyphs)
glyphs = tuple(glyphs)
if glyphs in self.classes_:
return
self.classes_.add(glyphs)
for glyph in glyphs:
if glyph in self.glyphs_:
raise OpenTypeLibError(
f"Glyph {glyph} is already present in class.", None
)
self.glyphs_[glyph] = glyphs
def classes(self):
# In ClassDef1 tables, class id #0 does not need to be encoded
# because zero is the default. Therefore, we use id #0 for the
# glyph class that has the largest number of members. However,
# in other tables than ClassDef1, 0 means "every other glyph"
# so we should not use that ID for any real glyph classes;
# we implement this by inserting an empty set at position 0.
#
# TODO: Instead of counting the number of glyphs in each class,
# we should determine the encoded size. If the glyphs in a large
# class form a contiguous range, the encoding is actually quite
# compact, whereas a non-contiguous set might need a lot of bytes
# in the output file. We don't get this right with the key below.
result = sorted(self.classes_, key=lambda s: (-len(s), s))
if not self.useClass0_:
result.insert(0, frozenset())
return result
def build(self):
glyphClasses = {}
for classID, glyphs in enumerate(self.classes()):
if classID == 0:
continue
for glyph in glyphs:
glyphClasses[glyph] = classID
classDef = ot.ClassDef()
classDef.classDefs = glyphClasses
return classDef
AXIS_VALUE_NEGATIVE_INFINITY = fixedToFloat(-0x80000000, 16)
AXIS_VALUE_POSITIVE_INFINITY = fixedToFloat(0x7FFFFFFF, 16)
STATName = Union[int, str, Dict[str, str]]
"""A raw name ID, English name, or multilingual name."""
def buildStatTable(
ttFont: TTFont,
axes,
locations=None,
elidedFallbackName: Union[STATName, STATNameStatement] = 2,
windowsNames: bool = True,
macNames: bool = True,
) -> None:
"""Add a 'STAT' table to 'ttFont'.
'axes' is a list of dictionaries describing axes and their
values.
Example::
axes = [
dict(
tag="wght",
name="Weight",
ordering=0, # optional
values=[
dict(value=100, name='Thin'),
dict(value=300, name='Light'),
dict(value=400, name='Regular', flags=0x2),
dict(value=900, name='Black'),
],
)
]
Each axis dict must have 'tag' and 'name' items. 'tag' maps
to the 'AxisTag' field. 'name' can be a name ID (int), a string,
or a dictionary containing multilingual names (see the
addMultilingualName() name table method), and will translate to
the AxisNameID field.
An axis dict may contain an 'ordering' item that maps to the
AxisOrdering field. If omitted, the order of the axes list is
used to calculate AxisOrdering fields.
The axis dict may contain a 'values' item, which is a list of
dictionaries describing AxisValue records belonging to this axis.
Each value dict must have a 'name' item, which can be a name ID
(int), a string, or a dictionary containing multilingual names,
like the axis name. It translates to the ValueNameID field.
Optionally the value dict can contain a 'flags' item. It maps to
the AxisValue Flags field, and will be 0 when omitted.
The format of the AxisValue is determined by the remaining contents
of the value dictionary:
If the value dict contains a 'value' item, an AxisValue record
Format 1 is created. If in addition to the 'value' item it contains
a 'linkedValue' item, an AxisValue record Format 3 is built.
If the value dict contains a 'nominalValue' item, an AxisValue
record Format 2 is built. Optionally it may contain 'rangeMinValue'
and 'rangeMaxValue' items. These map to -Infinity and +Infinity
respectively if omitted.
You cannot specify Format 4 AxisValue tables this way, as they are
not tied to a single axis, and specify a name for a location that
is defined by multiple axes values. Instead, you need to supply the
'locations' argument.
The optional 'locations' argument specifies AxisValue Format 4
tables. It should be a list of dicts, where each dict has a 'name'
item, which works just like the value dicts above, an optional
'flags' item (defaulting to 0x0), and a 'location' dict. A
location dict key is an axis tag, and the associated value is the
location on the specified axis. They map to the AxisIndex and Value
fields of the AxisValueRecord.
Example::
locations = [
dict(name='Regular ABCD', location=dict(wght=300, ABCD=100)),
dict(name='Bold ABCD XYZ', location=dict(wght=600, ABCD=200)),
]
The optional 'elidedFallbackName' argument can be a name ID (int),
a string, a dictionary containing multilingual names, or a list of
STATNameStatements. It translates to the ElidedFallbackNameID field.
The 'ttFont' argument must be a TTFont instance that already has a
'name' table. If a 'STAT' table already exists, it will be
overwritten by the newly created one.
"""
ttFont["STAT"] = ttLib.newTable("STAT")
statTable = ttFont["STAT"].table = ot.STAT()
statTable.ElidedFallbackNameID = _addName(
ttFont, elidedFallbackName, windows=windowsNames, mac=macNames
)
# 'locations' contains data for AxisValue Format 4
axisRecords, axisValues = _buildAxisRecords(
axes, ttFont, windowsNames=windowsNames, macNames=macNames
)
if not locations:
statTable.Version = 0x00010001
else:
# We'll be adding Format 4 AxisValue records, which
# requires a higher table version
statTable.Version = 0x00010002
multiAxisValues = _buildAxisValuesFormat4(
locations, axes, ttFont, windowsNames=windowsNames, macNames=macNames
)
axisValues = multiAxisValues + axisValues
ttFont["name"].names.sort()
# Store AxisRecords
axisRecordArray = ot.AxisRecordArray()
axisRecordArray.Axis = axisRecords
# XXX these should not be hard-coded but computed automatically
statTable.DesignAxisRecordSize = 8
statTable.DesignAxisRecord = axisRecordArray
statTable.DesignAxisCount = len(axisRecords)
statTable.AxisValueCount = 0
statTable.AxisValueArray = None
if axisValues:
# Store AxisValueRecords
axisValueArray = ot.AxisValueArray()
axisValueArray.AxisValue = axisValues
statTable.AxisValueArray = axisValueArray
statTable.AxisValueCount = len(axisValues)
def _buildAxisRecords(axes, ttFont, windowsNames=True, macNames=True):
axisRecords = []
axisValues = []
for axisRecordIndex, axisDict in enumerate(axes):
axis = ot.AxisRecord()
axis.AxisTag = axisDict["tag"]
axis.AxisNameID = _addName(
ttFont, axisDict["name"], 256, windows=windowsNames, mac=macNames
)
axis.AxisOrdering = axisDict.get("ordering", axisRecordIndex)
axisRecords.append(axis)
for axisVal in axisDict.get("values", ()):
axisValRec = ot.AxisValue()
axisValRec.AxisIndex = axisRecordIndex
axisValRec.Flags = axisVal.get("flags", 0)
axisValRec.ValueNameID = _addName(
ttFont, axisVal["name"], windows=windowsNames, mac=macNames
)
if "value" in axisVal:
axisValRec.Value = axisVal["value"]
if "linkedValue" in axisVal:
axisValRec.Format = 3
axisValRec.LinkedValue = axisVal["linkedValue"]
else:
axisValRec.Format = 1
elif "nominalValue" in axisVal:
axisValRec.Format = 2
axisValRec.NominalValue = axisVal["nominalValue"]
axisValRec.RangeMinValue = axisVal.get(
"rangeMinValue", AXIS_VALUE_NEGATIVE_INFINITY
)
axisValRec.RangeMaxValue = axisVal.get(
"rangeMaxValue", AXIS_VALUE_POSITIVE_INFINITY
)
else:
raise ValueError("Can't determine format for AxisValue")
axisValues.append(axisValRec)
return axisRecords, axisValues
def _buildAxisValuesFormat4(locations, axes, ttFont, windowsNames=True, macNames=True):
axisTagToIndex = {}
for axisRecordIndex, axisDict in enumerate(axes):
axisTagToIndex[axisDict["tag"]] = axisRecordIndex
axisValues = []
for axisLocationDict in locations:
axisValRec = ot.AxisValue()
axisValRec.Format = 4
axisValRec.ValueNameID = _addName(
ttFont, axisLocationDict["name"], windows=windowsNames, mac=macNames
)
axisValRec.Flags = axisLocationDict.get("flags", 0)
axisValueRecords = []
for tag, value in axisLocationDict["location"].items():
avr = ot.AxisValueRecord()
avr.AxisIndex = axisTagToIndex[tag]
avr.Value = value
axisValueRecords.append(avr)
axisValueRecords.sort(key=lambda avr: avr.AxisIndex)
axisValRec.AxisCount = len(axisValueRecords)
axisValRec.AxisValueRecord = axisValueRecords
axisValues.append(axisValRec)
return axisValues
def _addName(
ttFont: TTFont,
value: Union[STATName, STATNameStatement],
minNameID: int = 0,
windows: bool = True,
mac: bool = True,
) -> int:
nameTable = ttFont["name"]
if isinstance(value, int):
# Already a nameID
return value
if isinstance(value, str):
names = dict(en=value)
elif isinstance(value, dict):
names = value
elif isinstance(value, list):
nameID = nameTable._findUnusedNameID()
for nameRecord in value:
if isinstance(nameRecord, STATNameStatement):
nameTable.setName(
nameRecord.string,
nameID,
nameRecord.platformID,
nameRecord.platEncID,
nameRecord.langID,
)
else:
raise TypeError("value must be a list of STATNameStatements")
return nameID
else:
raise TypeError("value must be int, str, dict or list")
return nameTable.addMultilingualName(
names, ttFont=ttFont, windows=windows, mac=mac, minNameID=minNameID
)
def buildMathTable(
ttFont,
constants=None,
italicsCorrections=None,
topAccentAttachments=None,
extendedShapes=None,
mathKerns=None,
minConnectorOverlap=0,
vertGlyphVariants=None,
horizGlyphVariants=None,
vertGlyphAssembly=None,
horizGlyphAssembly=None,
):
"""
Add a 'MATH' table to 'ttFont'.
'constants' is a dictionary of math constants. The keys are the constant
names from the MATH table specification (with capital first letter), and the
values are the constant values as numbers.
'italicsCorrections' is a dictionary of italic corrections. The keys are the
glyph names, and the values are the italic corrections as numbers.
'topAccentAttachments' is a dictionary of top accent attachments. The keys
are the glyph names, and the values are the top accent horizontal positions
as numbers.
'extendedShapes' is a set of extended shape glyphs.
'mathKerns' is a dictionary of math kerns. The keys are the glyph names, and
the values are dictionaries. The keys of these dictionaries are the side
names ('TopRight', 'TopLeft', 'BottomRight', 'BottomLeft'), and the values
are tuples of two lists. The first list contains the correction heights as
numbers, and the second list contains the kern values as numbers.
'minConnectorOverlap' is the minimum connector overlap as a number.
'vertGlyphVariants' is a dictionary of vertical glyph variants. The keys are
the glyph names, and the values are tuples of glyph name and full advance height.
'horizGlyphVariants' is a dictionary of horizontal glyph variants. The keys
are the glyph names, and the values are tuples of glyph name and full
advance width.
'vertGlyphAssembly' is a dictionary of vertical glyph assemblies. The keys
are the glyph names, and the values are tuples of assembly parts and italics
correction. The assembly parts are tuples of glyph name, flags, start
connector length, end connector length, and full advance height.
'horizGlyphAssembly' is a dictionary of horizontal glyph assemblies. The
keys are the glyph names, and the values are tuples of assembly parts
and italics correction. The assembly parts are tuples of glyph name, flags,
start connector length, end connector length, and full advance width.
Where a number is expected, an integer or a float can be used. The floats
will be rounded.
Example::
constants = {
"ScriptPercentScaleDown": 70,
"ScriptScriptPercentScaleDown": 50,
"DelimitedSubFormulaMinHeight": 24,
"DisplayOperatorMinHeight": 60,
...
}
italicsCorrections = {
"fitalic-math": 100,
"fbolditalic-math": 120,
...
}
topAccentAttachments = {
"circumflexcomb": 500,
"acutecomb": 400,
"A": 300,
"B": 340,
...
}
extendedShapes = {"parenleft", "parenright", ...}
mathKerns = {
"A": {
"TopRight": ([-50, -100], [10, 20, 30]),
"TopLeft": ([50, 100], [10, 20, 30]),
...
},
...
}
vertGlyphVariants = {
"parenleft": [("parenleft", 700), ("parenleft.size1", 1000), ...],
"parenright": [("parenright", 700), ("parenright.size1", 1000), ...],
...
}
vertGlyphAssembly = {
"braceleft": [
(
("braceleft.bottom", 0, 0, 200, 500),
("braceleft.extender", 1, 200, 200, 200)),
("braceleft.middle", 0, 100, 100, 700),
("braceleft.extender", 1, 200, 200, 200),
("braceleft.top", 0, 200, 0, 500),
),
100,
],
...
}
"""
glyphMap = ttFont.getReverseGlyphMap()
ttFont["MATH"] = math = ttLib.newTable("MATH")
math.table = table = ot.MATH()
table.Version = 0x00010000
table.populateDefaults()
table.MathConstants = _buildMathConstants(constants)
table.MathGlyphInfo = _buildMathGlyphInfo(
glyphMap,
italicsCorrections,
topAccentAttachments,
extendedShapes,
mathKerns,
)
table.MathVariants = _buildMathVariants(
glyphMap,
minConnectorOverlap,
vertGlyphVariants,
horizGlyphVariants,
vertGlyphAssembly,
horizGlyphAssembly,
)
def _buildMathConstants(constants):
if not constants:
return None
mathConstants = ot.MathConstants()
for conv in mathConstants.getConverters():
value = otRound(constants.get(conv.name, 0))
if conv.tableClass:
assert issubclass(conv.tableClass, ot.MathValueRecord)
value = _mathValueRecord(value)
setattr(mathConstants, conv.name, value)
return mathConstants
def _buildMathGlyphInfo(
glyphMap,
italicsCorrections,
topAccentAttachments,
extendedShapes,
mathKerns,
):
if not any([extendedShapes, italicsCorrections, topAccentAttachments, mathKerns]):
return None
info = ot.MathGlyphInfo()
info.populateDefaults()
if italicsCorrections:
coverage = buildCoverage(italicsCorrections.keys(), glyphMap)
info.MathItalicsCorrectionInfo = ot.MathItalicsCorrectionInfo()
info.MathItalicsCorrectionInfo.Coverage = coverage
info.MathItalicsCorrectionInfo.ItalicsCorrectionCount = len(coverage.glyphs)
info.MathItalicsCorrectionInfo.ItalicsCorrection = [
_mathValueRecord(italicsCorrections[n]) for n in coverage.glyphs
]
if topAccentAttachments:
coverage = buildCoverage(topAccentAttachments.keys(), glyphMap)
info.MathTopAccentAttachment = ot.MathTopAccentAttachment()
info.MathTopAccentAttachment.TopAccentCoverage = coverage
info.MathTopAccentAttachment.TopAccentAttachmentCount = len(coverage.glyphs)
info.MathTopAccentAttachment.TopAccentAttachment = [
_mathValueRecord(topAccentAttachments[n]) for n in coverage.glyphs
]
if extendedShapes:
info.ExtendedShapeCoverage = buildCoverage(extendedShapes, glyphMap)
if mathKerns:
coverage = buildCoverage(mathKerns.keys(), glyphMap)
info.MathKernInfo = ot.MathKernInfo()
info.MathKernInfo.MathKernCoverage = coverage
info.MathKernInfo.MathKernCount = len(coverage.glyphs)
info.MathKernInfo.MathKernInfoRecords = []
for glyph in coverage.glyphs:
record = ot.MathKernInfoRecord()
for side in {"TopRight", "TopLeft", "BottomRight", "BottomLeft"}:
if side in mathKerns[glyph]:
correctionHeights, kernValues = mathKerns[glyph][side]
assert len(correctionHeights) == len(kernValues) - 1
kern = ot.MathKern()
kern.HeightCount = len(correctionHeights)
kern.CorrectionHeight = [
_mathValueRecord(h) for h in correctionHeights
]
kern.KernValue = [_mathValueRecord(v) for v in kernValues]
setattr(record, f"{side}MathKern", kern)
info.MathKernInfo.MathKernInfoRecords.append(record)
return info
def _buildMathVariants(
glyphMap,
minConnectorOverlap,
vertGlyphVariants,
horizGlyphVariants,
vertGlyphAssembly,
horizGlyphAssembly,
):
if not any(
[vertGlyphVariants, horizGlyphVariants, vertGlyphAssembly, horizGlyphAssembly]
):
return None
variants = ot.MathVariants()
variants.populateDefaults()
variants.MinConnectorOverlap = minConnectorOverlap
if vertGlyphVariants or vertGlyphAssembly:
variants.VertGlyphCoverage, variants.VertGlyphConstruction = (
_buildMathGlyphConstruction(
glyphMap,
vertGlyphVariants,
vertGlyphAssembly,
)
)
if horizGlyphVariants or horizGlyphAssembly:
variants.HorizGlyphCoverage, variants.HorizGlyphConstruction = (
_buildMathGlyphConstruction(
glyphMap,
horizGlyphVariants,
horizGlyphAssembly,
)
)
return variants
def _buildMathGlyphConstruction(glyphMap, variants, assemblies):
glyphs = set()
if variants:
glyphs.update(variants.keys())
if assemblies:
glyphs.update(assemblies.keys())
coverage = buildCoverage(glyphs, glyphMap)
constructions = []
for glyphName in coverage.glyphs:
construction = ot.MathGlyphConstruction()
construction.populateDefaults()
if variants and glyphName in variants:
construction.VariantCount = len(variants[glyphName])
construction.MathGlyphVariantRecord = []
for variantName, advance in variants[glyphName]:
record = ot.MathGlyphVariantRecord()
record.VariantGlyph = variantName
record.AdvanceMeasurement = otRound(advance)
construction.MathGlyphVariantRecord.append(record)
if assemblies and glyphName in assemblies:
parts, ic = assemblies[glyphName]
construction.GlyphAssembly = ot.GlyphAssembly()
construction.GlyphAssembly.ItalicsCorrection = _mathValueRecord(ic)
construction.GlyphAssembly.PartCount = len(parts)
construction.GlyphAssembly.PartRecords = []
for part in parts:
part_name, flags, start, end, advance = part
record = ot.GlyphPartRecord()
record.glyph = part_name
record.PartFlags = int(flags)
record.StartConnectorLength = otRound(start)
record.EndConnectorLength = otRound(end)
record.FullAdvance = otRound(advance)
construction.GlyphAssembly.PartRecords.append(record)
constructions.append(construction)
return coverage, constructions
def _mathValueRecord(value):
value_record = ot.MathValueRecord()
value_record.Value = otRound(value)
return value_record
__all__ = ["maxCtxFont"]
def maxCtxFont(font):
"""Calculate the usMaxContext value for an entire font."""
maxCtx = 0
for tag in ("GSUB", "GPOS"):
if tag not in font:
continue
table = font[tag].table
if not table.LookupList:
continue
for lookup in table.LookupList.Lookup:
for st in lookup.SubTable:
maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st)
return maxCtx
def maxCtxSubtable(maxCtx, tag, lookupType, st):
"""Calculate usMaxContext based on a single lookup table (and an existing
max value).
"""
# single positioning, single / multiple substitution
if (tag == "GPOS" and lookupType == 1) or (
tag == "GSUB" and lookupType in (1, 2, 3)
):
maxCtx = max(maxCtx, 1)
# pair positioning
elif tag == "GPOS" and lookupType == 2:
maxCtx = max(maxCtx, 2)
# ligatures
elif tag == "GSUB" and lookupType == 4:
for ligatures in st.ligatures.values():
for ligature in ligatures:
maxCtx = max(maxCtx, ligature.CompCount)
# context
elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5):
maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub")
# chained context
elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain"
)
# extensions
elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7):
maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
# reverse-chained context
elif tag == "GSUB" and lookupType == 8:
maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse")
return maxCtx
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""):
"""Calculate usMaxContext based on a contextual feature subtable."""
if st.Format == 1:
for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 2:
for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 3:
maxCtx = maxCtxContextualRule(maxCtx, st, chain)
return maxCtx
def maxCtxContextualRule(maxCtx, st, chain):
"""Calculate usMaxContext based on a contextual feature rule."""
if not chain:
return max(maxCtx, st.GlyphCount)
elif chain == "Reverse":
return max(maxCtx, 1 + st.LookAheadGlyphCount)
return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects.
The Pen Protocol
A Pen is a kind of object that standardizes the way how to "draw" outlines:
it is a middle man between an outline and a drawing. In other words:
it is an abstraction for drawing outlines, making sure that outline objects
don't need to know the details about how and where they're being drawn, and
that drawings don't need to know the details of how outlines are stored.
The most basic pattern is this::
outline.draw(pen) # 'outline' draws itself onto 'pen'
Pens can be used to render outlines to the screen, but also to construct
new outlines. Eg. an outline object can be both a drawable object (it has a
draw() method) as well as a pen itself: you *build* an outline using pen
methods.
The AbstractPen class defines the Pen protocol. It implements almost
nothing (only no-op closePath() and endPath() methods), but is useful
for documentation purposes. Subclassing it basically tells the reader:
"this class implements the Pen protocol.". An examples of an AbstractPen
subclass is :py:class:`fontTools.pens.transformPen.TransformPen`.
The BasePen class is a base implementation useful for pens that actually
draw (for example a pen renders outlines using a native graphics engine).
BasePen contains a lot of base functionality, making it very easy to build
a pen that fully conforms to the pen protocol. Note that if you subclass
BasePen, you *don't* override moveTo(), lineTo(), etc., but _moveTo(),
_lineTo(), etc. See the BasePen doc string for details. Examples of
BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and
fontTools.pens.cocoaPen.CocoaPen.
Coordinates are usually expressed as (x, y) tuples, but generally any
sequence of length 2 will do.
"""
from typing import Tuple, Dict
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.transform import DecomposedTransform, Identity
__all__ = [
"AbstractPen",
"NullPen",
"BasePen",
"PenError",
"decomposeSuperBezierSegment",
"decomposeQuadraticSegment",
]
class PenError(Exception):
"""Represents an error during penning."""
class OpenContourError(PenError):
pass
class AbstractPen:
def moveTo(self, pt: Tuple[float, float]) -> None:
"""Begin a new sub path, set the current point to 'pt'. You must
end each sub path with a call to pen.closePath() or pen.endPath().
"""
raise NotImplementedError
def lineTo(self, pt: Tuple[float, float]) -> None:
"""Draw a straight line from the current point to 'pt'."""
raise NotImplementedError
def curveTo(self, *points: Tuple[float, float]) -> None:
"""Draw a cubic bezier with an arbitrary number of control points.
The last point specified is on-curve, all others are off-curve
(control) points. If the number of control points is > 2, the
segment is split into multiple bezier segments. This works
like this:
Let n be the number of control points (which is the number of
arguments to this call minus 1). If n==2, a plain vanilla cubic
bezier is drawn. If n==1, we fall back to a quadratic segment and
if n==0 we draw a straight line. It gets interesting when n>2:
n-1 PostScript-style cubic segments will be drawn as if it were
one curve. See decomposeSuperBezierSegment().
The conversion algorithm used for n>2 is inspired by NURB
splines, and is conceptually equivalent to the TrueType "implied
points" principle. See also decomposeQuadraticSegment().
"""
raise NotImplementedError
def qCurveTo(self, *points: Tuple[float, float]) -> None:
"""Draw a whole string of quadratic curve segments.
The last point specified is on-curve, all others are off-curve
points.
This method implements TrueType-style curves, breaking up curves
using 'implied points': between each two consequtive off-curve points,
there is one implied point exactly in the middle between them. See
also decomposeQuadraticSegment().
The last argument (normally the on-curve point) may be None.
This is to support contours that have NO on-curve points (a rarely
seen feature of TrueType outlines).
"""
raise NotImplementedError
def closePath(self) -> None:
"""Close the current sub path. You must call either pen.closePath()
or pen.endPath() after each sub path.
"""
pass
def endPath(self) -> None:
"""End the current sub path, but don't close it. You must call
either pen.closePath() or pen.endPath() after each sub path.
"""
pass
def addComponent(
self,
glyphName: str,
transformation: Tuple[float, float, float, float, float, float],
) -> None:
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
containing an affine transformation, or a Transform object from the
fontTools.misc.transform module. More precisely: it should be a
sequence containing 6 numbers.
"""
raise NotImplementedError
def addVarComponent(
self,
glyphName: str,
transformation: DecomposedTransform,
location: Dict[str, float],
) -> None:
"""Add a VarComponent sub glyph. The 'transformation' argument
must be a DecomposedTransform from the fontTools.misc.transform module,
and the 'location' argument must be a dictionary mapping axis tags
to their locations.
"""
# GlyphSet decomposes for us
raise AttributeError
class NullPen(AbstractPen):
"""A pen that does nothing."""
def moveTo(self, pt):
pass
def lineTo(self, pt):
pass
def curveTo(self, *points):
pass
def qCurveTo(self, *points):
pass
def closePath(self):
pass
def endPath(self):
pass
def addComponent(self, glyphName, transformation):
pass
def addVarComponent(self, glyphName, transformation, location):
pass
class LoggingPen(LogMixin, AbstractPen):
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)"""
pass
class MissingComponentError(KeyError):
"""Indicates a component pointing to a non-existent glyph in the glyphset."""
class DecomposingPen(LoggingPen):
"""Implements a 'addComponent' method that decomposes components
(i.e. draws them onto self as simple contours).
It can also be used as a mixin class (e.g. see ContourRecordingPen).
You must override moveTo, lineTo, curveTo and qCurveTo. You may
additionally override closePath, endPath and addComponent.
By default a warning message is logged when a base glyph is missing;
set the class variable ``skipMissingComponents`` to False if you want
all instances of a sub-class to raise a :class:`MissingComponentError`
exception by default.
"""
skipMissingComponents = True
# alias error for convenience
MissingComponentError = MissingComponentError
def __init__(
self,
glyphSet,
*args,
skipMissingComponents=None,
reverseFlipped=False,
**kwargs,
):
"""Takes a 'glyphSet' argument (dict), in which the glyphs that are referenced
as components are looked up by their name.
If the optional 'reverseFlipped' argument is True, components whose transformation
matrix has a negative determinant will be decomposed with a reversed path direction
to compensate for the flip.
The optional 'skipMissingComponents' argument can be set to True/False to
override the homonymous class attribute for a given pen instance.
"""
super(DecomposingPen, self).__init__(*args, **kwargs)
self.glyphSet = glyphSet
self.skipMissingComponents = (
self.__class__.skipMissingComponents
if skipMissingComponents is None
else skipMissingComponents
)
self.reverseFlipped = reverseFlipped
def addComponent(self, glyphName, transformation):
"""Transform the points of the base glyph and draw it onto self."""
from fontTools.pens.transformPen import TransformPen
try:
glyph = self.glyphSet[glyphName]
except KeyError:
if not self.skipMissingComponents:
raise MissingComponentError(glyphName)
self.log.warning("glyph '%s' is missing from glyphSet; skipped" % glyphName)
else:
pen = self
if transformation != Identity:
pen = TransformPen(pen, transformation)
if self.reverseFlipped:
# if the transformation has a negative determinant, it will
# reverse the contour direction of the component
a, b, c, d = transformation[:4]
det = a * d - b * c
if det < 0:
from fontTools.pens.reverseContourPen import ReverseContourPen
pen = ReverseContourPen(pen)
glyph.draw(pen)
def addVarComponent(self, glyphName, transformation, location):
# GlyphSet decomposes for us
raise AttributeError
class BasePen(DecomposingPen):
"""Base class for drawing pens. You must override _moveTo, _lineTo and
_curveToOne. You may additionally override _closePath, _endPath,
addComponent, addVarComponent, and/or _qCurveToOne. You should not
override any other methods.
"""
def __init__(self, glyphSet=None):
super(BasePen, self).__init__(glyphSet)
self.__currentPoint = None
# must override
def _moveTo(self, pt):
raise NotImplementedError
def _lineTo(self, pt):
raise NotImplementedError
def _curveToOne(self, pt1, pt2, pt3):
raise NotImplementedError
# may override
def _closePath(self):
pass
def _endPath(self):
pass
def _qCurveToOne(self, pt1, pt2):
"""This method implements the basic quadratic curve type. The
default implementation delegates the work to the cubic curve
function. Optionally override with a native implementation.
"""
pt0x, pt0y = self.__currentPoint
pt1x, pt1y = pt1
pt2x, pt2y = pt2
mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
# don't override
def _getCurrentPoint(self):
"""Return the current point. This is not part of the public
interface, yet is useful for subclasses.
"""
return self.__currentPoint
def closePath(self):
self._closePath()
self.__currentPoint = None
def endPath(self):
self._endPath()
self.__currentPoint = None
def moveTo(self, pt):
self._moveTo(pt)
self.__currentPoint = pt
def lineTo(self, pt):
self._lineTo(pt)
self.__currentPoint = pt
def curveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points
assert n >= 0
if n == 2:
# The common case, we have exactly two BCP's, so this is a standard
# cubic bezier. Even though decomposeSuperBezierSegment() handles
# this case just fine, we special-case it anyway since it's so
# common.
self._curveToOne(*points)
self.__currentPoint = points[-1]
elif n > 2:
# n is the number of control points; split curve into n-1 cubic
# bezier segments. The algorithm used here is inspired by NURB
# splines and the TrueType "implied point" principle, and ensures
# the smoothest possible connection between two curve segments,
# with no disruption in the curvature. It is practical since it
# allows one to construct multiple bezier segments with a much
# smaller amount of points.
_curveToOne = self._curveToOne
for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
_curveToOne(pt1, pt2, pt3)
self.__currentPoint = pt3
elif n == 1:
self.qCurveTo(*points)
elif n == 0:
self.lineTo(points[0])
else:
raise AssertionError("can't get there from here")
def qCurveTo(self, *points):
n = len(points) - 1 # 'n' is the number of control points
assert n >= 0
if points[-1] is None:
# Special case for TrueType quadratics: it is possible to
# define a contour with NO on-curve points. BasePen supports
# this by allowing the final argument (the expected on-curve
# point) to be None. We simulate the feature by making the implied
# on-curve point between the last and the first off-curve points
# explicit.
x, y = points[-2] # last off-curve point
nx, ny = points[0] # first off-curve point
impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
self.__currentPoint = impliedStartPoint
self._moveTo(impliedStartPoint)
points = points[:-1] + (impliedStartPoint,)
if n > 0:
# Split the string of points into discrete quadratic curve
# segments. Between any two consecutive off-curve points
# there's an implied on-curve point exactly in the middle.
# This is where the segment splits.
_qCurveToOne = self._qCurveToOne
for pt1, pt2 in decomposeQuadraticSegment(points):
_qCurveToOne(pt1, pt2)
self.__currentPoint = pt2
else:
self.lineTo(points[0])
def decomposeSuperBezierSegment(points):
"""Split the SuperBezier described by 'points' into a list of regular
bezier segments. The 'points' argument must be a sequence with length
3 or greater, containing (x, y) coordinates. The last point is the
destination on-curve point, the rest of the points are off-curve points.
The start point should not be supplied.
This function returns a list of (pt1, pt2, pt3) tuples, which each
specify a regular curveto-style bezier segment.
"""
n = len(points) - 1
assert n > 1
bezierSegments = []
pt1, pt2, pt3 = points[0], None, None
for i in range(2, n + 1):
# calculate points in between control points.
nDivisions = min(i, 3, n - i + 2)
for j in range(1, nDivisions):
factor = j / nDivisions
temp1 = points[i - 1]
temp2 = points[i - 2]
temp = (
temp2[0] + factor * (temp1[0] - temp2[0]),
temp2[1] + factor * (temp1[1] - temp2[1]),
)
if pt2 is None:
pt2 = temp
else:
pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1]))
bezierSegments.append((pt1, pt2, pt3))
pt1, pt2, pt3 = temp, None, None
bezierSegments.append((pt1, points[-2], points[-1]))
return bezierSegments
def decomposeQuadraticSegment(points):
"""Split the quadratic curve segment described by 'points' into a list
of "atomic" quadratic segments. The 'points' argument must be a sequence
with length 2 or greater, containing (x, y) coordinates. The last point
is the destination on-curve point, the rest of the points are off-curve
points. The start point should not be supplied.
This function returns a list of (pt1, pt2) tuples, which each specify a
plain quadratic bezier segment.
"""
n = len(points) - 1
assert n > 0
quadSegments = []
for i in range(n - 1):
x, y = points[i]
nx, ny = points[i + 1]
impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
quadSegments.append((points[i], impliedPt))
quadSegments.append((points[-2], points[-1]))
return quadSegments
class _TestPen(BasePen):
"""Test class that prints PostScript to stdout."""
def _moveTo(self, pt):
print("%s %s moveto" % (pt[0], pt[1]))
def _lineTo(self, pt):
print("%s %s lineto" % (pt[0], pt[1]))
def _curveToOne(self, bcp1, bcp2, pt):
print(
"%s %s %s %s %s %s curveto"
% (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])
)
def _closePath(self):
print("closepath")
if __name__ == "__main__":
pen = _TestPen(None)
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()
pen = _TestPen(None)
# testing the "no on-curve point" scenario
pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
pen.closePath()
from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect
from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds
from fontTools.pens.basePen import BasePen
__all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen):
"""Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points, so may be larger than the
actual bounding box if there are curves that don't have points
on their extremes.
When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple::
(xMin, yMin, xMax, yMax).
If ``ignoreSinglePoints`` is True, single points are ignored.
"""
def __init__(self, glyphSet, ignoreSinglePoints=False):
BasePen.__init__(self, glyphSet)
self.ignoreSinglePoints = ignoreSinglePoints
self.init()
def init(self):
self.bounds = None
self._start = None
def _moveTo(self, pt):
self._start = pt
if not self.ignoreSinglePoints:
self._addMoveTo()
def _addMoveTo(self):
if self._start is None:
return
bounds = self.bounds
if bounds:
self.bounds = updateBounds(bounds, self._start)
else:
x, y = self._start
self.bounds = (x, y, x, y)
self._start = None
def _lineTo(self, pt):
self._addMoveTo()
self.bounds = updateBounds(self.bounds, pt)
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp1)
bounds = updateBounds(bounds, bcp2)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
class BoundsPen(ControlBoundsPen):
"""Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute
than the "control bounds".
When the shape has been drawn, the bounds are available as the
``bounds`` attribute of the pen object. It's a 4-tuple::
(xMin, yMin, xMax, yMax)
"""
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(
bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt)
)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds):
bounds = unionRect(
bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt)
)
self.bounds = bounds
venv\Lib\site-packages\fontTools\pens\cairoPen.py
"""Pen to draw to a Cairo graphics library context."""
from fontTools.pens.basePen import BasePen
__all__ = ["CairoPen"]
class CairoPen(BasePen):
"""Pen to draw to a Cairo graphics library context."""
def __init__(self, glyphSet, context):
BasePen.__init__(self, glyphSet)
self.context = context
def _moveTo(self, p):
self.context.move_to(*p)
def _lineTo(self, p):
self.context.line_to(*p)
def _curveToOne(self, p1, p2, p3):
self.context.curve_to(*p1, *p2, *p3)
def _closePath(self):
self.context.close_path()
venv\Lib\site-packages\fontTools\pens\cocoaPen.py
from fontTools.pens.basePen import BasePen
__all__ = ["CocoaPen"]
class CocoaPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from AppKit import NSBezierPath
path = NSBezierPath.bezierPath()
self.path = path
def _moveTo(self, p):
self.path.moveToPoint_(p)
def _lineTo(self, p):
self.path.lineToPoint_(p)
def _curveToOne(self, p1, p2, p3):
self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
def _closePath(self):
self.path.closePath()
venv\Lib\site-packages\fontTools\pens\cu2quPen.py
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic
from fontTools.pens.basePen import decomposeSuperBezierSegment
from fontTools.pens.filterPen import FilterPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.pointPen import BasePointToSegmentPen
from fontTools.pens.pointPen import ReverseContourPointPen
class Cu2QuPen(FilterPen):
"""A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools SegmentPen protocol.
Args:
other_pen: another SegmentPen used to draw the transformed outline.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
stats: a dictionary counting the point numbers of quadratic segments.
all_quadratic: if True (default), only quadratic b-splines are generated.
if False, quadratic curves or cubic curves are generated depending
on which one is more economical.
"""
def __init__(
self,
other_pen,
max_err,
reverse_direction=False,
stats=None,
all_quadratic=True,
):
if reverse_direction:
other_pen = ReverseContourPen(other_pen)
super().__init__(other_pen)
self.max_err = max_err
self.stats = stats
self.all_quadratic = all_quadratic
def _convert_curve(self, pt1, pt2, pt3):
curve = (self.current_pt, pt1, pt2, pt3)
result = curve_to_quadratic(curve, self.max_err, self.all_quadratic)
if self.stats is not None:
n = str(len(result) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
if self.all_quadratic:
self.qCurveTo(*result[1:])
else:
if len(result) == 3:
self.qCurveTo(*result[1:])
else:
assert len(result) == 4
super().curveTo(*result[1:])
def curveTo(self, *points):
n = len(points)
if n == 3:
# this is the most common case, so we special-case it
self._convert_curve(*points)
elif n > 3:
for segment in decomposeSuperBezierSegment(points):
self._convert_curve(*segment)
else:
self.qCurveTo(*points)
class Cu2QuPointPen(BasePointToSegmentPen):
"""A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools PointPen protocol.
Args:
other_point_pen: another PointPen used to draw the transformed outline.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: reverse the winding direction of all contours.
stats: a dictionary counting the point numbers of quadratic segments.
all_quadratic: if True (default), only quadratic b-splines are generated.
if False, quadratic curves or cubic curves are generated depending
on which one is more economical.
"""
__points_required = {
"move": (1, operator.eq),
"line": (1, operator.eq),
"qcurve": (2, operator.ge),
"curve": (3, operator.eq),
}
def __init__(
self,
other_point_pen,
max_err,
reverse_direction=False,
stats=None,
all_quadratic=True,
):
BasePointToSegmentPen.__init__(self)
if reverse_direction:
self.pen = ReverseContourPointPen(other_point_pen)
else:
self.pen = other_point_pen
self.max_err = max_err
self.stats = stats
self.all_quadratic = all_quadratic
def _flushContour(self, segments):
assert len(segments) >= 1
closed = segments[0][0] != "move"
new_segments = []
prev_points = segments[-1][1]
prev_on_curve = prev_points[-1][0]
for segment_type, points in segments:
if segment_type == "curve":
for sub_points in self._split_super_bezier_segments(points):
on_curve, smooth, name, kwargs = sub_points[-1]
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
cubic = [prev_on_curve, bcp1, bcp2, on_curve]
quad = curve_to_quadratic(cubic, self.max_err, self.all_quadratic)
if self.stats is not None:
n = str(len(quad) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
new_points = [(pt, False, None, {}) for pt in quad[1:-1]]
new_points.append((on_curve, smooth, name, kwargs))
if self.all_quadratic or len(new_points) == 2:
new_segments.append(["qcurve", new_points])
else:
new_segments.append(["curve", new_points])
prev_on_curve = sub_points[-1][0]
else:
new_segments.append([segment_type, points])
prev_on_curve = points[-1][0]
if closed:
# the BasePointToSegmentPen.endPath method that calls _flushContour
# rotates the point list of closed contours so that they end with
# the first on-curve point. We restore the original starting point.
new_segments = new_segments[-1:] + new_segments[:-1]
self._drawPoints(new_segments)
def _split_super_bezier_segments(self, points):
sub_segments = []
# n is the number of control points
n = len(points) - 1
if n == 2:
# a simple bezier curve segment
sub_segments.append(points)
elif n > 2:
# a "super" bezier; decompose it
on_curve, smooth, name, kwargs = points[-1]
num_sub_segments = n - 1
for i, sub_points in enumerate(
decomposeSuperBezierSegment([pt for pt, _, _, _ in points])
):
new_segment = []
for point in sub_points[:-1]:
new_segment.append((point, False, None, {}))
if i == (num_sub_segments - 1):
# the last on-curve keeps its original attributes
new_segment.append((on_curve, smooth, name, kwargs))
else:
# on-curves of sub-segments are always "smooth"
new_segment.append((sub_points[-1], True, None, {}))
sub_segments.append(new_segment)
else:
raise AssertionError("expected 2 control points, found: %d" % n)
return sub_segments
def _drawPoints(self, segments):
pen = self.pen
pen.beginPath()
last_offcurves = []
points_required = self.__points_required
for i, (segment_type, points) in enumerate(segments):
if segment_type in points_required:
n, op = points_required[segment_type]
assert op(len(points), n), (
f"illegal {segment_type!r} segment point count: "
f"expected {n}, got {len(points)}"
)
offcurves = points[:-1]
if i == 0:
# any off-curve points preceding the first on-curve
# will be appended at the end of the contour
last_offcurves = offcurves
else:
for pt, smooth, name, kwargs in offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs)
pt, smooth, name, kwargs = points[-1]
if pt is None:
assert segment_type == "qcurve"
# special quadratic contour with no on-curve points:
# we need to skip the "None" point. See also the Pen
# protocol's qCurveTo() method and fontTools.pens.basePen
pass
else:
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
else:
raise AssertionError("unexpected segment type: %r" % segment_type)
for pt, smooth, name, kwargs in last_offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs)
pen.endPath()
def addComponent(self, baseGlyphName, transformation):
assert self.currentPath is None
self.pen.addComponent(baseGlyphName, transformation)
class Cu2QuMultiPen:
"""A filter multi-pen to convert cubic bezier curves to quadratic b-splines
in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
Args:
other_pens: list of SegmentPens used to draw the transformed outlines.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
This pen does not follow the normal SegmentPen protocol. Instead, its
moveTo/lineTo/qCurveTo/curveTo methods take a list of tuples that are
arguments that would normally be passed to a SegmentPen, one item for
each of the pens in other_pens.
"""
# TODO Simplify like 3e8ebcdce592fe8a59ca4c3a294cc9724351e1ce
# Remove start_pts and _add_moveTO
def __init__(self, other_pens, max_err, reverse_direction=False):
if reverse_direction:
other_pens = [
ReverseContourPen(pen, outputImpliedClosingLine=True)
for pen in other_pens
]
self.pens = other_pens
self.max_err = max_err
self.start_pts = None
self.current_pts = None
def _check_contour_is_open(self):
if self.current_pts is None:
raise AssertionError("moveTo is required")
def _check_contour_is_closed(self):
if self.current_pts is not None:
raise AssertionError("closePath or endPath is required")
def _add_moveTo(self):
if self.start_pts is not None:
for pt, pen in zip(self.start_pts, self.pens):
pen.moveTo(*pt)
self.start_pts = None
def moveTo(self, pts):
self._check_contour_is_closed()
self.start_pts = self.current_pts = pts
self._add_moveTo()
def lineTo(self, pts):
self._check_contour_is_open()
self._add_moveTo()
for pt, pen in zip(pts, self.pens):
pen.lineTo(*pt)
self.current_pts = pts
def qCurveTo(self, pointsList):
self._check_contour_is_open()
if len(pointsList[0]) == 1:
self.lineTo([(points[0],) for points in pointsList])
return
self._add_moveTo()
current_pts = []
for points, pen in zip(pointsList, self.pens):
pen.qCurveTo(*points)
current_pts.append((points[-1],))
self.current_pts = current_pts
def _curves_to_quadratic(self, pointsList):
curves = []
for current_pt, points in zip(self.current_pts, pointsList):
curves.append(current_pt + points)
quadratics = curves_to_quadratic(curves, [self.max_err] * len(curves))
pointsList = []
for quadratic in quadratics:
pointsList.append(quadratic[1:])
self.qCurveTo(pointsList)
def curveTo(self, pointsList):
self._check_contour_is_open()
self._curves_to_quadratic(pointsList)
def closePath(self):
self._check_contour_is_open()
if self.start_pts is None:
for pen in self.pens:
pen.closePath()
self.current_pts = self.start_pts = None
def endPath(self):
self._check_contour_is_open()
if self.start_pts is None:
for pen in self.pens:
pen.endPath()
self.current_pts = self.start_pts = None
def addComponent(self, glyphName, transformations):
self._check_contour_is_closed()
for trans, pen in zip(transformations, self.pens):
pen.addComponent(glyphName, trans)
from __future__ import annotations
from fontTools.pens.basePen import AbstractPen, DecomposingPen
from fontTools.pens.pointPen import AbstractPointPen, DecomposingPointPen
from fontTools.pens.recordingPen import RecordingPen
class _PassThruComponentsMixin(object):
def addComponent(self, glyphName, transformation, **kwargs):
self._outPen.addComponent(glyphName, transformation, **kwargs)
class FilterPen(_PassThruComponentsMixin, AbstractPen):
"""Base class for pens that apply some transformation to the coordinates
they receive and pass them to another pen.
You can override any of its methods. The default implementation does
nothing, but passes the commands unmodified to the other pen.
>>> from fontTools.pens.recordingPen import RecordingPen
>>> rec = RecordingPen()
>>> pen = FilterPen(rec)
>>> v = iter(rec.value)
>>> pen.moveTo((0, 0))
>>> next(v)
('moveTo', ((0, 0),))
>>> pen.lineTo((1, 1))
>>> next(v)
('lineTo', ((1, 1),))
>>> pen.curveTo((2, 2), (3, 3), (4, 4))
>>> next(v)
('curveTo', ((2, 2), (3, 3), (4, 4)))
>>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8))
>>> next(v)
('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8)))
>>> pen.closePath()
>>> next(v)
('closePath', ())
>>> pen.moveTo((9, 9))
>>> next(v)
('moveTo', ((9, 9),))
>>> pen.endPath()
>>> next(v)
('endPath', ())
>>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0))
>>> next(v)
('addComponent', ('foo', (1, 0, 0, 1, 0, 0)))
"""
def __init__(self, outPen):
self._outPen = outPen
self.current_pt = None
def moveTo(self, pt):
self._outPen.moveTo(pt)
self.current_pt = pt
def lineTo(self, pt):
self._outPen.lineTo(pt)
self.current_pt = pt
def curveTo(self, *points):
self._outPen.curveTo(*points)
self.current_pt = points[-1]
def qCurveTo(self, *points):
self._outPen.qCurveTo(*points)
self.current_pt = points[-1]
def closePath(self):
self._outPen.closePath()
self.current_pt = None
def endPath(self):
self._outPen.endPath()
self.current_pt = None
class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
"""A "buffered" filter pen that accumulates contour data, passes
it through a ``filterContour`` method when the contour is closed or ended,
and finally draws the result with the output pen.
Components are passed through unchanged.
"""
def __init__(self, outPen):
super(ContourFilterPen, self).__init__()
self._outPen = outPen
def closePath(self):
super(ContourFilterPen, self).closePath()
self._flushContour()
def endPath(self):
super(ContourFilterPen, self).endPath()
self._flushContour()
def _flushContour(self):
result = self.filterContour(self.value)
if result is not None:
self.value = result
self.replay(self._outPen)
self.value = []
def filterContour(self, contour):
"""Subclasses must override this to perform the filtering.
The contour is a list of pen (operator, operands) tuples.
Operators are strings corresponding to the AbstractPen methods:
"moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and
"endPath". The operands are the positional arguments that are
passed to each method.
If the method doesn't return a value (i.e. returns None), it's
assumed that the argument was modified in-place.
Otherwise, the return value is drawn with the output pen.
"""
return # or return contour
class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen):
"""Baseclass for point pens that apply some transformation to the
coordinates they receive and pass them to another point pen.
You can override any of its methods. The default implementation does
nothing, but passes the commands unmodified to the other pen.
>>> from fontTools.pens.recordingPen import RecordingPointPen
>>> rec = RecordingPointPen()
>>> pen = FilterPointPen(rec)
>>> v = iter(rec.value)
>>> pen.beginPath(identifier="abc")
>>> next(v)
('beginPath', (), {'identifier': 'abc'})
>>> pen.addPoint((1, 2), "line", False)
>>> next(v)
('addPoint', ((1, 2), 'line', False, None), {})
>>> pen.addComponent("a", (2, 0, 0, 2, 10, -10), identifier="0001")
>>> next(v)
('addComponent', ('a', (2, 0, 0, 2, 10, -10)), {'identifier': '0001'})
>>> pen.endPath()
>>> next(v)
('endPath', (), {})
"""
def __init__(self, outPen):
self._outPen = outPen
def beginPath(self, **kwargs):
self._outPen.beginPath(**kwargs)
def endPath(self):
self._outPen.endPath()
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
class _DecomposingFilterPenMixin:
"""Mixin class that decomposes components as regular contours.
Shared by both DecomposingFilterPen and DecomposingFilterPointPen.
Takes two required parameters, another (segment or point) pen 'outPen' to draw
with, and a 'glyphSet' dict of drawable glyph objects to draw components from.
The 'skipMissingComponents' and 'reverseFlipped' optional arguments work the
same as in the DecomposingPen/DecomposingPointPen. Both are False by default.
In addition, the decomposing filter pens also take the following two options:
'include' is an optional set of component base glyph names to consider for
decomposition; the default include=None means decompose all components no matter
the base glyph name).
'decomposeNested' (bool) controls whether to recurse decomposition into nested
components of components (this only matters when 'include' was also provided);
if False, only decompose top-level components included in the set, but not
also their children.
"""
# raises MissingComponentError if base glyph is not found in glyphSet
skipMissingComponents = False
def __init__(
self,
outPen,
glyphSet,
skipMissingComponents=None,
reverseFlipped=False,
include: set[str] | None = None,
decomposeNested: bool = True,
):
super().__init__(
outPen=outPen,
glyphSet=glyphSet,
skipMissingComponents=skipMissingComponents,
reverseFlipped=reverseFlipped,
)
self.include = include
self.decomposeNested = decomposeNested
def addComponent(self, baseGlyphName, transformation, **kwargs):
# only decompose the component if it's included in the set
if self.include is None or baseGlyphName in self.include:
# if we're decomposing nested components, temporarily set include to None
include_bak = self.include
if self.decomposeNested and self.include:
self.include = None
try:
super().addComponent(baseGlyphName, transformation, **kwargs)
finally:
if self.include != include_bak:
self.include = include_bak
else:
_PassThruComponentsMixin.addComponent(
self, baseGlyphName, transformation, **kwargs
)
class DecomposingFilterPen(_DecomposingFilterPenMixin, DecomposingPen, FilterPen):
"""Filter pen that draws components as regular contours."""
pass
class DecomposingFilterPointPen(
_DecomposingFilterPenMixin, DecomposingPointPen, FilterPointPen
):
"""Filter point pen that draws components as regular contours."""
pass
# -*- coding: utf-8 -*-
"""Pen to rasterize paths with FreeType."""
__all__ = ["FreeTypePen"]
import os
import ctypes
import platform
import subprocess
import collections
import math
import freetype
from freetype.raw import FT_Outline_Get_Bitmap, FT_Outline_Get_BBox, FT_Outline_Get_CBox
from freetype.ft_types import FT_Pos
from freetype.ft_structs import FT_Vector, FT_BBox, FT_Bitmap, FT_Outline
from freetype.ft_enums import (
FT_OUTLINE_NONE,
FT_OUTLINE_EVEN_ODD_FILL,
FT_PIXEL_MODE_GRAY,
FT_CURVE_TAG_ON,
FT_CURVE_TAG_CONIC,
FT_CURVE_TAG_CUBIC,
)
from freetype.ft_errors import FT_Exception
from fontTools.pens.basePen import BasePen, PenError
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform
Contour = collections.namedtuple("Contour", ("points", "tags"))
class FreeTypePen(BasePen):
"""Pen to rasterize paths with FreeType. Requires `freetype-py` module.
Constructs ``FT_Outline`` from the paths, and renders it within a bitmap
buffer.
For ``array()`` and ``show()``, `numpy` and `matplotlib` must be installed.
For ``image()``, `Pillow` is required. Each module is lazily loaded when the
corresponding method is called.
Args:
glyphSet: a dictionary of drawable glyph objects keyed by name
used to resolve component references in composite glyphs.
Examples:
If `numpy` and `matplotlib` is available, the following code will
show the glyph image of `fi` in a new window::
from fontTools.ttLib import TTFont
from fontTools.pens.freetypePen import FreeTypePen
from fontTools.misc.transform import Offset
pen = FreeTypePen(None)
font = TTFont('SourceSansPro-Regular.otf')
glyph = font.getGlyphSet()['fi']
glyph.draw(pen)
width, ascender, descender = glyph.width, font['OS/2'].usWinAscent, -font['OS/2'].usWinDescent
height = ascender - descender
pen.show(width=width, height=height, transform=Offset(0, -descender))
Combining with `uharfbuzz`, you can typeset a chunk of glyphs in a pen::
import uharfbuzz as hb
from fontTools.pens.freetypePen import FreeTypePen
from fontTools.pens.transformPen import TransformPen
from fontTools.misc.transform import Offset
en1, en2, ar, ja = 'Typesetting', 'Jeff', 'صف الحروف', 'たいぷせっと'
for text, font_path, direction, typo_ascender, typo_descender, vhea_ascender, vhea_descender, contain, features in (
(en1, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, False, {"kern": True, "liga": True}),
(en2, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, True, {"kern": True, "liga": True}),
(ar, 'NotoSansArabic-Regular.ttf', 'rtl', 1374, -738, None, None, False, {"kern": True, "liga": True}),
(ja, 'NotoSansJP-Regular.otf', 'ltr', 880, -120, 500, -500, False, {"palt": True, "kern": True}),
(ja, 'NotoSansJP-Regular.otf', 'ttb', 880, -120, 500, -500, False, {"vert": True, "vpal": True, "vkrn": True})
):
blob = hb.Blob.from_file_path(font_path)
face = hb.Face(blob)
font = hb.Font(face)
buf = hb.Buffer()
buf.direction = direction
buf.add_str(text)
buf.guess_segment_properties()
hb.shape(font, buf, features)
x, y = 0, 0
pen = FreeTypePen(None)
for info, pos in zip(buf.glyph_infos, buf.glyph_positions):
gid = info.codepoint
transformed = TransformPen(pen, Offset(x + pos.x_offset, y + pos.y_offset))
font.draw_glyph_with_pen(gid, transformed)
x += pos.x_advance
y += pos.y_advance
offset, width, height = None, None, None
if direction in ('ltr', 'rtl'):
offset = (0, -typo_descender)
width = x
height = typo_ascender - typo_descender
else:
offset = (-vhea_descender, -y)
width = vhea_ascender - vhea_descender
height = -y
pen.show(width=width, height=height, transform=Offset(*offset), contain=contain)
For Jupyter Notebook, the rendered image will be displayed in a cell if
you replace ``show()`` with ``image()`` in the examples.
"""
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self.contours = []
def outline(self, transform=None, evenOdd=False):
"""Converts the current contours to ``FT_Outline``.
Args:
transform: An optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
"""
transform = transform or Transform()
if not hasattr(transform, "transformPoint"):
transform = Transform(*transform)
n_contours = len(self.contours)
n_points = sum((len(contour.points) for contour in self.contours))
points = []
for contour in self.contours:
for point in contour.points:
point = transform.transformPoint(point)
points.append(
FT_Vector(
FT_Pos(otRound(point[0] * 64)), FT_Pos(otRound(point[1] * 64))
)
)
tags = []
for contour in self.contours:
for tag in contour.tags:
tags.append(tag)
contours = []
contours_sum = 0
for contour in self.contours:
contours_sum += len(contour.points)
contours.append(contours_sum - 1)
flags = FT_OUTLINE_EVEN_ODD_FILL if evenOdd else FT_OUTLINE_NONE
return FT_Outline(
(ctypes.c_short)(n_contours),
(ctypes.c_short)(n_points),
(FT_Vector * n_points)(*points),
(ctypes.c_ubyte * n_points)(*tags),
(ctypes.c_short * n_contours)(*contours),
(ctypes.c_int)(flags),
)
def buffer(
self, width=None, height=None, transform=None, contain=False, evenOdd=False
):
"""Renders the current contours within a bitmap buffer.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: An optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A tuple of ``(buffer, size)``, where ``buffer`` is a ``bytes``
object of the resulted bitmap and ``size`` is a 2-tuple of its
dimension.
Notes:
The image size should always be given explicitly if you need to get
a proper glyph image. When ``width`` and ``height`` are omitted, it
forcifully fits to the bounding box and the side bearings get
cropped. If you pass ``0`` to both ``width`` and ``height`` and set
``contain`` to ``True``, it expands to the bounding box while
maintaining the origin of the contours, meaning that LSB will be
maintained but RSB won’t. The difference between the two becomes
more obvious when rotate or skew transformation is applied.
Example:
.. code-block:: pycon
>>>
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> buf, size = pen.buffer(width=500, height=1000)
>> type(buf), len(buf), size
(, 500000, (500, 1000))
"""
transform = transform or Transform()
if not hasattr(transform, "transformPoint"):
transform = Transform(*transform)
contain_x, contain_y = contain or width is None, contain or height is None
if contain_x or contain_y:
dx, dy = transform.dx, transform.dy
bbox = self.bbox
p1, p2, p3, p4 = (
transform.transformPoint((bbox[0], bbox[1])),
transform.transformPoint((bbox[2], bbox[1])),
transform.transformPoint((bbox[0], bbox[3])),
transform.transformPoint((bbox[2], bbox[3])),
)
px, py = (p1[0], p2[0], p3[0], p4[0]), (p1[1], p2[1], p3[1], p4[1])
if contain_x:
if width is None:
dx = dx - min(*px)
width = max(*px) - min(*px)
else:
dx = dx - min(min(*px), 0.0)
width = max(width, max(*px) - min(min(*px), 0.0))
if contain_y:
if height is None:
dy = dy - min(*py)
height = max(*py) - min(*py)
else:
dy = dy - min(min(*py), 0.0)
height = max(height, max(*py) - min(min(*py), 0.0))
transform = Transform(*transform[:4], dx, dy)
width, height = math.ceil(width), math.ceil(height)
buf = ctypes.create_string_buffer(width * height)
bitmap = FT_Bitmap(
(ctypes.c_int)(height),
(ctypes.c_int)(width),
(ctypes.c_int)(width),
(ctypes.POINTER(ctypes.c_ubyte))(buf),
(ctypes.c_short)(256),
(ctypes.c_ubyte)(FT_PIXEL_MODE_GRAY),
(ctypes.c_char)(0),
(ctypes.c_void_p)(None),
)
outline = self.outline(transform=transform, evenOdd=evenOdd)
err = FT_Outline_Get_Bitmap(
freetype.get_handle(), ctypes.byref(outline), ctypes.byref(bitmap)
)
if err != 0:
raise FT_Exception(err)
return buf.raw, (width, height)
def array(
self, width=None, height=None, transform=None, contain=False, evenOdd=False
):
"""Returns the rendered contours as a numpy array. Requires `numpy`.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: An optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A ``numpy.ndarray`` object with a shape of ``(height, width)``.
Each element takes a value in the range of ``[0.0, 1.0]``.
Notes:
The image size should always be given explicitly if you need to get
a proper glyph image. When ``width`` and ``height`` are omitted, it
forcifully fits to the bounding box and the side bearings get
cropped. If you pass ``0`` to both ``width`` and ``height`` and set
``contain`` to ``True``, it expands to the bounding box while
maintaining the origin of the contours, meaning that LSB will be
maintained but RSB won’t. The difference between the two becomes
more obvious when rotate or skew transformation is applied.
Example:
.. code-block:: pycon
>>>
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> arr = pen.array(width=500, height=1000)
>> type(a), a.shape
(, (1000, 500))
"""
import numpy as np
buf, size = self.buffer(
width=width,
height=height,
transform=transform,
contain=contain,
evenOdd=evenOdd,
)
return np.frombuffer(buf, "B").reshape((size[1], size[0])) / 255.0
def show(
self, width=None, height=None, transform=None, contain=False, evenOdd=False
):
"""Plots the rendered contours with `pyplot`. Requires `numpy` and
`matplotlib`.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: An optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Notes:
The image size should always be given explicitly if you need to get
a proper glyph image. When ``width`` and ``height`` are omitted, it
forcifully fits to the bounding box and the side bearings get
cropped. If you pass ``0`` to both ``width`` and ``height`` and set
``contain`` to ``True``, it expands to the bounding box while
maintaining the origin of the contours, meaning that LSB will be
maintained but RSB won’t. The difference between the two becomes
more obvious when rotate or skew transformation is applied.
Example:
.. code-block:: pycon
>>>
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> pen.show(width=500, height=1000)
"""
from matplotlib import pyplot as plt
a = self.array(
width=width,
height=height,
transform=transform,
contain=contain,
evenOdd=evenOdd,
)
plt.imshow(a, cmap="gray_r", vmin=0, vmax=1)
plt.show()
def image(
self, width=None, height=None, transform=None, contain=False, evenOdd=False
):
"""Returns the rendered contours as a PIL image. Requires `Pillow`.
Can be used to display a glyph image in Jupyter Notebook.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: An optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A ``PIL.image`` object. The image is filled in black with alpha
channel obtained from the rendered bitmap.
Notes:
The image size should always be given explicitly if you need to get
a proper glyph image. When ``width`` and ``height`` are omitted, it
forcifully fits to the bounding box and the side bearings get
cropped. If you pass ``0`` to both ``width`` and ``height`` and set
``contain`` to ``True``, it expands to the bounding box while
maintaining the origin of the contours, meaning that LSB will be
maintained but RSB won’t. The difference between the two becomes
more obvious when rotate or skew transformation is applied.
Example:
.. code-block:: pycon
>>>
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> img = pen.image(width=500, height=1000)
>> type(img), img.size
(, (500, 1000))
"""
from PIL import Image
buf, size = self.buffer(
width=width,
height=height,
transform=transform,
contain=contain,
evenOdd=evenOdd,
)
img = Image.new("L", size, 0)
img.putalpha(Image.frombuffer("L", size, buf))
return img
@property
def bbox(self):
"""Computes the exact bounding box of an outline.
Returns:
A tuple of ``(xMin, yMin, xMax, yMax)``.
"""
bbox = FT_BBox()
outline = self.outline()
FT_Outline_Get_BBox(ctypes.byref(outline), ctypes.byref(bbox))
return (bbox.xMin / 64.0, bbox.yMin / 64.0, bbox.xMax / 64.0, bbox.yMax / 64.0)
@property
def cbox(self):
"""Returns an outline's ‘control box’.
Returns:
A tuple of ``(xMin, yMin, xMax, yMax)``.
"""
cbox = FT_BBox()
outline = self.outline()
FT_Outline_Get_CBox(ctypes.byref(outline), ctypes.byref(cbox))
return (cbox.xMin / 64.0, cbox.yMin / 64.0, cbox.xMax / 64.0, cbox.yMax / 64.0)
def _moveTo(self, pt):
contour = Contour([], [])
self.contours.append(contour)
contour.points.append(pt)
contour.tags.append(FT_CURVE_TAG_ON)
def _lineTo(self, pt):
if not (self.contours and len(self.contours[-1].points) > 0):
raise PenError("Contour missing required initial moveTo")
contour = self.contours[-1]
contour.points.append(pt)
contour.tags.append(FT_CURVE_TAG_ON)
def _curveToOne(self, p1, p2, p3):
if not (self.contours and len(self.contours[-1].points) > 0):
raise PenError("Contour missing required initial moveTo")
t1, t2, t3 = FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_ON
contour = self.contours[-1]
for p, t in ((p1, t1), (p2, t2), (p3, t3)):
contour.points.append(p)
contour.tags.append(t)
def _qCurveToOne(self, p1, p2):
if not (self.contours and len(self.contours[-1].points) > 0):
raise PenError("Contour missing required initial moveTo")
t1, t2 = FT_CURVE_TAG_CONIC, FT_CURVE_TAG_ON
contour = self.contours[-1]
for p, t in ((p1, t1), (p2, t2)):
contour.points.append(p)
contour.tags.append(t)
# Modified from https://github.com/adobe-type-tools/psautohint/blob/08b346865710ed3c172f1eb581d6ef243b203f99/python/psautohint/ufoFont.py#L800-L838
import hashlib
from fontTools.pens.basePen import MissingComponentError
from fontTools.pens.pointPen import AbstractPointPen
class HashPointPen(AbstractPointPen):
"""
This pen can be used to check if a glyph's contents (outlines plus
components) have changed.
Components are added as the original outline plus each composite's
transformation.
Example: You have some TrueType hinting code for a glyph which you want to
compile. The hinting code specifies a hash value computed with HashPointPen
that was valid for the glyph's outlines at the time the hinting code was
written. Now you can calculate the hash for the glyph's current outlines to
check if the outlines have changed, which would probably make the hinting
code invalid.
> glyph = ufo[name]
> hash_pen = HashPointPen(glyph.width, ufo)
> glyph.drawPoints(hash_pen)
> ttdata = glyph.lib.get("public.truetype.instructions", None)
> stored_hash = ttdata.get("id", None) # The hash is stored in the "id" key
> if stored_hash is None or stored_hash != hash_pen.hash:
> logger.error(f"Glyph hash mismatch, glyph '{name}' will have no instructions in font.")
> else:
> # The hash values are identical, the outline has not changed.
> # Compile the hinting code ...
> pass
If you want to compare a glyph from a source format which supports floating point
coordinates and transformations against a glyph from a format which has restrictions
on the precision of floats, e.g. UFO vs. TTF, you must use an appropriate rounding
function to make the values comparable. For TTF fonts with composites, this
construct can be used to make the transform values conform to F2Dot14:
> ttf_hash_pen = HashPointPen(ttf_glyph_width, ttFont.getGlyphSet())
> ttf_round_pen = RoundingPointPen(ttf_hash_pen, transformRoundFunc=partial(floatToFixedToFloat, precisionBits=14))
> ufo_hash_pen = HashPointPen(ufo_glyph.width, ufo)
> ttf_glyph.drawPoints(ttf_round_pen, ttFont["glyf"])
> ufo_round_pen = RoundingPointPen(ufo_hash_pen, transformRoundFunc=partial(floatToFixedToFloat, precisionBits=14))
> ufo_glyph.drawPoints(ufo_round_pen)
> assert ttf_hash_pen.hash == ufo_hash_pen.hash
"""
def __init__(self, glyphWidth=0, glyphSet=None):
self.glyphset = glyphSet
self.data = ["w%s" % round(glyphWidth, 9)]
@property
def hash(self):
data = "".join(self.data)
if len(data) >= 128:
data = hashlib.sha512(data.encode("ascii")).hexdigest()
return data
def beginPath(self, identifier=None, **kwargs):
pass
def endPath(self):
self.data.append("|")
def addPoint(
self,
pt,
segmentType=None,
smooth=False,
name=None,
identifier=None,
**kwargs,
):
if segmentType is None:
pt_type = "o" # offcurve
else:
pt_type = segmentType[0]
self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}")
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
tr = "".join([f"{t:+}" for t in transformation])
self.data.append("[")
try:
self.glyphset[baseGlyphName].drawPoints(self)
except KeyError:
raise MissingComponentError(baseGlyphName)
self.data.append(f"({tr})]")
"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing
for shapes.
"""
from fontTools.pens.basePen import BasePen
from fontTools.misc.bezierTools import solveQuadratic, solveCubic
__all__ = ["PointInsidePen"]
class PointInsidePen(BasePen):
"""This pen implements "point inside" testing: to test whether
a given point lies inside the shape (black) or outside (white).
Instances of this class can be recycled, as long as the
setTestPoint() method is used to set the new point to test.
:Example:
.. code-block::
pen = PointInsidePen(glyphSet, (100, 200))
outline.draw(pen)
isInside = pen.getResult()
Both the even-odd algorithm and the non-zero-winding-rule
algorithm are implemented. The latter is the default, specify
True for the evenOdd argument of __init__ or setTestPoint
to use the even-odd algorithm.
"""
# This class implements the classical "shoot a ray from the test point
# to infinity and count how many times it intersects the outline" (as well
# as the non-zero variant, where the counter is incremented if the outline
# intersects the ray in one direction and decremented if it intersects in
# the other direction).
# I found an amazingly clear explanation of the subtleties involved in
# implementing this correctly for polygons here:
# http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
# I extended the principles outlined on that page to curves.
def __init__(self, glyphSet, testPoint, evenOdd=False):
BasePen.__init__(self, glyphSet)
self.setTestPoint(testPoint, evenOdd)
def setTestPoint(self, testPoint, evenOdd=False):
"""Set the point to test. Call this _before_ the outline gets drawn."""
self.testPoint = testPoint
self.evenOdd = evenOdd
self.firstPoint = None
self.intersectionCount = 0
def getWinding(self):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
return self.intersectionCount
def getResult(self):
"""After the shape has been drawn, getResult() returns True if the test
point lies within the (black) shape, and False if it doesn't.
"""
winding = self.getWinding()
if self.evenOdd:
result = winding % 2
else: # non-zero
result = self.intersectionCount != 0
return not not result
def _addIntersection(self, goingUp):
if self.evenOdd or goingUp:
self.intersectionCount += 1
else:
self.intersectionCount -= 1
def _moveTo(self, point):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
self.firstPoint = point
def _lineTo(self, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = point
if x1 < x and x2 < x:
return
if y1 < y and y2 < y:
return
if y1 >= y and y2 >= y:
return
dx = x2 - x1
dy = y2 - y1
t = (y - y1) / dy
ix = dx * t + x1
if ix < x:
return
self._addIntersection(y2 > y1)
def _curveToOne(self, bcp1, bcp2, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp1
x3, y3 = bcp2
x4, y4 = point
if x1 < x and x2 < x and x3 < x and x4 < x:
return
if y1 < y and y2 < y and y3 < y and y4 < y:
return
if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
return
dy = y1
cy = (y2 - dy) * 3.0
by = (y3 - y2) * 3.0 - cy
ay = y4 - dy - cy - by
solutions = sorted(solveCubic(ay, by, cy, dy - y))
solutions = [t for t in solutions if -0.0 <= t <= 1.0]
if not solutions:
return
dx = x1
cx = (x2 - dx) * 3.0
bx = (x3 - x2) * 3.0 - cx
ax = x4 - dx - cx - bx
above = y1 >= y
lastT = None
for t in solutions:
if t == lastT:
continue
lastT = t
t2 = t * t
t3 = t2 * t
direction = 3 * ay * t2 + 2 * by * t + cy
incomingGoingUp = outgoingGoingUp = direction > 0.0
if direction == 0.0:
direction = 6 * ay * t + 2 * by
outgoingGoingUp = direction > 0.0
incomingGoingUp = not outgoingGoingUp
if direction == 0.0:
direction = ay
incomingGoingUp = outgoingGoingUp = direction > 0.0
xt = ax * t3 + bx * t2 + cx * t + dx
if xt < x:
continue
if t in (0.0, -0.0):
if not outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
elif t == 1.0:
if incomingGoingUp:
self._addIntersection(incomingGoingUp)
else:
if incomingGoingUp == outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
# else:
# we're not really intersecting, merely touching
def _qCurveToOne_unfinished(self, bcp, point):
# XXX need to finish this, for now doing it through a cubic
# (BasePen implements _qCurveTo in terms of a cubic) will
# have to do.
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp
x3, y3 = point
c = y1
b = (y2 - c) * 2.0
a = y3 - c - b
solutions = sorted(solveQuadratic(a, b, c - y))
solutions = [
t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON
]
if not solutions:
return
# XXX
def _closePath(self):
if self._getCurrentPoint() != self.firstPoint:
self.lineTo(self.firstPoint)
self.firstPoint = None
def _endPath(self):
"""Insideness is not defined for open contours."""
raise NotImplementedError
venv\Lib\site-packages\fontTools\pens\pointPen.py
"""
=========
PointPens
=========
Where **SegmentPens** have an intuitive approach to drawing
(if you're familiar with postscript anyway), the **PointPen**
is geared towards accessing all the data in the contours of
the glyph. A PointPen has a very simple interface, it just
steps through all the points in a call from glyph.drawPoints().
This allows the caller to provide more data for each point.
For instance, whether or not a point is smooth, and its name.
"""
from __future__ import annotations
import math
from typing import Any, Dict, List, Optional, Tuple
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.transform import DecomposedTransform, Identity
from fontTools.pens.basePen import AbstractPen, MissingComponentError, PenError
__all__ = [
"AbstractPointPen",
"BasePointToSegmentPen",
"PointToSegmentPen",
"SegmentToPointPen",
"GuessSmoothPointPen",
"ReverseContourPointPen",
]
# Some type aliases to make it easier below
Point = Tuple[float, float]
PointName = Optional[str]
# [(pt, smooth, name, kwargs)]
SegmentPointList = List[Tuple[Optional[Point], bool, PointName, Any]]
SegmentType = Optional[str]
SegmentList = List[Tuple[SegmentType, SegmentPointList]]
class AbstractPointPen:
"""Baseclass for all PointPens."""
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
"""Start a new sub path."""
raise NotImplementedError
def endPath(self) -> None:
"""End the current sub path."""
raise NotImplementedError
def addPoint(
self,
pt: Tuple[float, float],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Add a point to the current sub path."""
raise NotImplementedError
def addComponent(
self,
baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Add a sub glyph."""
raise NotImplementedError
def addVarComponent(
self,
glyphName: str,
transformation: DecomposedTransform,
location: Dict[str, float],
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Add a VarComponent sub glyph. The 'transformation' argument
must be a DecomposedTransform from the fontTools.misc.transform module,
and the 'location' argument must be a dictionary mapping axis tags
to their locations.
"""
# ttGlyphSet decomposes for us
raise AttributeError
class BasePointToSegmentPen(AbstractPointPen):
"""
Base class for retrieving the outline in a segment-oriented
way. The PointPen protocol is simple yet also a little tricky,
so when you need an outline presented as segments but you have
as points, do use this base implementation as it properly takes
care of all the edge cases.
"""
def __init__(self) -> None:
self.currentPath = None
def beginPath(self, identifier=None, **kwargs):
if self.currentPath is not None:
raise PenError("Path already begun.")
self.currentPath = []
def _flushContour(self, segments: SegmentList) -> None:
"""Override this method.
It will be called for each non-empty sub path with a list
of segments: the 'segments' argument.
The segments list contains tuples of length 2:
(segmentType, points)
segmentType is one of "move", "line", "curve" or "qcurve".
"move" may only occur as the first segment, and it signifies
an OPEN path. A CLOSED path does NOT start with a "move", in
fact it will not contain a "move" at ALL.
The 'points' field in the 2-tuple is a list of point info
tuples. The list has 1 or more items, a point tuple has
four items:
(point, smooth, name, kwargs)
'point' is an (x, y) coordinate pair.
For a closed path, the initial moveTo point is defined as
the last point of the last segment.
The 'points' list of "move" and "line" segments always contains
exactly one point tuple.
"""
raise NotImplementedError
def endPath(self) -> None:
if self.currentPath is None:
raise PenError("Path not begun.")
points = self.currentPath
self.currentPath = None
if not points:
return
if len(points) == 1:
# Not much more we can do than output a single move segment.
pt, segmentType, smooth, name, kwargs = points[0]
segments: SegmentList = [("move", [(pt, smooth, name, kwargs)])]
self._flushContour(segments)
return
segments = []
if points[0][1] == "move":
# It's an open contour, insert a "move" segment for the first
# point and remove that first point from the point list.
pt, segmentType, smooth, name, kwargs = points[0]
segments.append(("move", [(pt, smooth, name, kwargs)]))
points.pop(0)
else:
# It's a closed contour. Locate the first on-curve point, and
# rotate the point list so that it _ends_ with an on-curve
# point.
firstOnCurve = None
for i in range(len(points)):
segmentType = points[i][1]
if segmentType is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# Special case for quadratics: a contour with no on-curve
# points. Add a "None" point. (See also the Pen protocol's
# qCurveTo() method and fontTools.pens.basePen.py.)
points.append((None, "qcurve", None, None, None))
else:
points = points[firstOnCurve + 1 :] + points[: firstOnCurve + 1]
currentSegment: SegmentPointList = []
for pt, segmentType, smooth, name, kwargs in points:
currentSegment.append((pt, smooth, name, kwargs))
if segmentType is None:
continue
segments.append((segmentType, currentSegment))
currentSegment = []
self._flushContour(segments)
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self.currentPath is None:
raise PenError("Path not begun")
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
class PointToSegmentPen(BasePointToSegmentPen):
"""
Adapter class that converts the PointPen protocol to the
(Segment)Pen protocol.
NOTE: The segment pen does not support and will drop point names, identifiers
and kwargs.
"""
def __init__(self, segmentPen, outputImpliedClosingLine: bool = False) -> None:
BasePointToSegmentPen.__init__(self)
self.pen = segmentPen
self.outputImpliedClosingLine = outputImpliedClosingLine
def _flushContour(self, segments):
if not segments:
raise PenError("Must have at least one segment.")
pen = self.pen
if segments[0][0] == "move":
# It's an open path.
closed = False
points = segments[0][1]
if len(points) != 1:
raise PenError(f"Illegal move segment point count: {len(points)}")
movePt, _, _, _ = points[0]
del segments[0]
else:
# It's a closed path, do a moveTo to the last
# point of the last segment.
closed = True
segmentType, points = segments[-1]
movePt, _, _, _ = points[-1]
if movePt is None:
# quad special case: a contour with no on-curve points contains
# one "qcurve" segment that ends with a point that's None. We
# must not output a moveTo() in that case.
pass
else:
pen.moveTo(movePt)
outputImpliedClosingLine = self.outputImpliedClosingLine
nSegments = len(segments)
lastPt = movePt
for i in range(nSegments):
segmentType, points = segments[i]
points = [pt for pt, _, _, _ in points]
if segmentType == "line":
if len(points) != 1:
raise PenError(f"Illegal line segment point count: {len(points)}")
pt = points[0]
# For closed contours, a 'lineTo' is always implied from the last oncurve
# point to the starting point, thus we can omit it when the last and
# starting point don't overlap.
# However, when the last oncurve point is a "line" segment and has same
# coordinates as the starting point of a closed contour, we need to output
# the closing 'lineTo' explicitly (regardless of the value of the
# 'outputImpliedClosingLine' option) in order to disambiguate this case from
# the implied closing 'lineTo', otherwise the duplicate point would be lost.
# See https://github.com/googlefonts/fontmake/issues/572.
if (
i + 1 != nSegments
or outputImpliedClosingLine
or not closed
or pt == lastPt
):
pen.lineTo(pt)
lastPt = pt
elif segmentType == "curve":
pen.curveTo(*points)
lastPt = points[-1]
elif segmentType == "qcurve":
pen.qCurveTo(*points)
lastPt = points[-1]
else:
raise PenError(f"Illegal segmentType: {segmentType}")
if closed:
pen.closePath()
else:
pen.endPath()
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
del identifier # unused
del kwargs # unused
self.pen.addComponent(glyphName, transform)
class SegmentToPointPen(AbstractPen):
"""
Adapter class that converts the (Segment)Pen protocol to the
PointPen protocol.
"""
def __init__(self, pointPen, guessSmooth=True) -> None:
if guessSmooth:
self.pen = GuessSmoothPointPen(pointPen)
else:
self.pen = pointPen
self.contour: Optional[List[Tuple[Point, SegmentType]]] = None
def _flushContour(self) -> None:
pen = self.pen
pen.beginPath()
for pt, segmentType in self.contour:
pen.addPoint(pt, segmentType=segmentType)
pen.endPath()
def moveTo(self, pt):
self.contour = []
self.contour.append((pt, "move"))
def lineTo(self, pt):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self.contour.append((pt, "line"))
def curveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
self.contour.append((pts[-1], "curve"))
def qCurveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if pts[-1] is None:
self.contour = []
else:
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
if pts[-1] is not None:
self.contour.append((pts[-1], "qcurve"))
def closePath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
self.contour[0] = self.contour[-1]
del self.contour[-1]
else:
# There's an implied line at the end, replace "move" with "line"
# for the first point
pt, tp = self.contour[0]
if tp == "move":
self.contour[0] = pt, "line"
self._flushContour()
self.contour = None
def endPath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self._flushContour()
self.contour = None
def addComponent(self, glyphName, transform):
if self.contour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform)
class GuessSmoothPointPen(AbstractPointPen):
"""
Filtering PointPen that tries to determine whether an on-curve point
should be "smooth", ie. that it's a "tangent" point or a "curve" point.
"""
def __init__(self, outPen, error=0.05):
self._outPen = outPen
self._error = error
self._points = None
def _flushContour(self):
if self._points is None:
raise PenError("Path not begun")
points = self._points
nPoints = len(points)
if not nPoints:
return
if points[0][1] == "move":
# Open path.
indices = range(1, nPoints - 1)
elif nPoints > 1:
# Closed path. To avoid having to mod the contour index, we
# simply abuse Python's negative index feature, and start at -1
indices = range(-1, nPoints - 1)
else:
# closed path containing 1 point (!), ignore.
indices = []
for i in indices:
pt, segmentType, _, name, kwargs = points[i]
if segmentType is None:
continue
prev = i - 1
next = i + 1
if points[prev][1] is not None and points[next][1] is not None:
continue
# At least one of our neighbors is an off-curve point
pt = points[i][0]
prevPt = points[prev][0]
nextPt = points[next][0]
if pt != prevPt and pt != nextPt:
dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
a1 = math.atan2(dy1, dx1)
a2 = math.atan2(dy2, dx2)
if abs(a1 - a2) < self._error:
points[i] = pt, segmentType, True, name, kwargs
for pt, segmentType, smooth, name, kwargs in points:
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
def beginPath(self, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Path already begun")
self._points = []
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.beginPath(**kwargs)
def endPath(self):
self._flushContour()
self._outPen.endPath()
self._points = None
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self._points is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self._points.append((pt, segmentType, False, name, kwargs))
def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Components must be added before or after contours")
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.addComponent(glyphName, transformation, **kwargs)
def addVarComponent(
self, glyphName, transformation, location, identifier=None, **kwargs
):
if self._points is not None:
raise PenError("VarComponents must be added before or after contours")
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.addVarComponent(glyphName, transformation, location, **kwargs)
class ReverseContourPointPen(AbstractPointPen):
"""
This is a PointPen that passes outline data to another PointPen, but
reversing the winding direction of all contours. Components are simply
passed through unchanged.
Closed contours are reversed in such a way that the first point remains
the first point.
"""
def __init__(self, outputPointPen):
self.pen = outputPointPen
# a place to store the points for the current sub path
self.currentContour = None
def _flushContour(self):
pen = self.pen
contour = self.currentContour
if not contour:
pen.beginPath(identifier=self.currentContourIdentifier)
pen.endPath()
return
closed = contour[0][1] != "move"
if not closed:
lastSegmentType = "move"
else:
# Remove the first point and insert it at the end. When
# the list of points gets reversed, this point will then
# again be at the start. In other words, the following
# will hold:
# for N in range(len(originalContour)):
# originalContour[N] == reversedContour[-N]
contour.append(contour.pop(0))
# Find the first on-curve point.
firstOnCurve = None
for i in range(len(contour)):
if contour[i][1] is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# There are no on-curve points, be basically have to
# do nothing but contour.reverse().
lastSegmentType = None
else:
lastSegmentType = contour[firstOnCurve][1]
contour.reverse()
if not closed:
# Open paths must start with a move, so we simply dump
# all off-curve points leading up to the first on-curve.
while contour[0][1] is None:
contour.pop(0)
pen.beginPath(identifier=self.currentContourIdentifier)
for pt, nextSegmentType, smooth, name, kwargs in contour:
if nextSegmentType is not None:
segmentType = lastSegmentType
lastSegmentType = nextSegmentType
else:
segmentType = None
pen.addPoint(
pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs
)
pen.endPath()
def beginPath(self, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Path already begun")
self.currentContour = []
self.currentContourIdentifier = identifier
self.onCurve = []
def endPath(self):
if self.currentContour is None:
raise PenError("Path not begun")
self._flushContour()
self.currentContour = None
def addPoint(
self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
):
if self.currentContour is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self.currentContour.append((pt, segmentType, smooth, name, kwargs))
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)
class DecomposingPointPen(LogMixin, AbstractPointPen):
"""Implements a 'addComponent' method that decomposes components
(i.e. draws them onto self as simple contours).
It can also be used as a mixin class (e.g. see DecomposingRecordingPointPen).
You must override beginPath, addPoint, endPath. You may
additionally override addVarComponent and addComponent.
By default a warning message is logged when a base glyph is missing;
set the class variable ``skipMissingComponents`` to False if you want
all instances of a sub-class to raise a :class:`MissingComponentError`
exception by default.
"""
skipMissingComponents = True
# alias error for convenience
MissingComponentError = MissingComponentError
def __init__(
self,
glyphSet,
*args,
skipMissingComponents=None,
reverseFlipped=False,
**kwargs,
):
"""Takes a 'glyphSet' argument (dict), in which the glyphs that are referenced
as components are looked up by their name.
If the optional 'reverseFlipped' argument is True, components whose transformation
matrix has a negative determinant will be decomposed with a reversed path direction
to compensate for the flip.
The optional 'skipMissingComponents' argument can be set to True/False to
override the homonymous class attribute for a given pen instance.
"""
super().__init__(*args, **kwargs)
self.glyphSet = glyphSet
self.skipMissingComponents = (
self.__class__.skipMissingComponents
if skipMissingComponents is None
else skipMissingComponents
)
self.reverseFlipped = reverseFlipped
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
"""Transform the points of the base glyph and draw it onto self.
The `identifier` parameter and any extra kwargs are ignored.
"""
from fontTools.pens.transformPen import TransformPointPen
try:
glyph = self.glyphSet[baseGlyphName]
except KeyError:
if not self.skipMissingComponents:
raise MissingComponentError(baseGlyphName)
self.log.warning(
"glyph '%s' is missing from glyphSet; skipped" % baseGlyphName
)
else:
pen = self
if transformation != Identity:
pen = TransformPointPen(pen, transformation)
if self.reverseFlipped:
# if the transformation has a negative determinant, it will
# reverse the contour direction of the component
a, b, c, d = transformation[:4]
if a * d - b * c < 0:
pen = ReverseContourPointPen(pen)
glyph.drawPoints(pen)
# Copyright 2016 Google Inc. All Rights Reserved.
# Copyright 2023 Behdad Esfahbod. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fontTools.qu2cu import quadratic_to_curves
from fontTools.pens.filterPen import ContourFilterPen
from fontTools.pens.reverseContourPen import ReverseContourPen
import math
class Qu2CuPen(ContourFilterPen):
"""A filter pen to convert quadratic bezier splines to cubic curves
using the FontTools SegmentPen protocol.
Args:
other_pen: another SegmentPen used to draw the transformed outline.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
stats: a dictionary counting the point numbers of cubic segments.
"""
def __init__(
self,
other_pen,
max_err,
all_cubic=False,
reverse_direction=False,
stats=None,
):
if reverse_direction:
other_pen = ReverseContourPen(other_pen)
super().__init__(other_pen)
self.all_cubic = all_cubic
self.max_err = max_err
self.stats = stats
def _quadratics_to_curve(self, q):
curves = quadratic_to_curves(q, self.max_err, all_cubic=self.all_cubic)
if self.stats is not None:
for curve in curves:
n = str(len(curve) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
for curve in curves:
if len(curve) == 4:
yield ("curveTo", curve[1:])
else:
yield ("qCurveTo", curve[1:])
def filterContour(self, contour):
quadratics = []
currentPt = None
newContour = []
for op, args in contour:
if op == "qCurveTo" and (
self.all_cubic or (len(args) > 2 and args[-1] is not None)
):
if args[-1] is None:
raise NotImplementedError(
"oncurve-less contours with all_cubic not implemented"
)
quadratics.append((currentPt,) + args)
else:
if quadratics:
newContour.extend(self._quadratics_to_curve(quadratics))
quadratics = []
newContour.append((op, args))
currentPt = args[-1] if args else None
if quadratics:
newContour.extend(self._quadratics_to_curve(quadratics))
if not self.all_cubic:
# Add back implicit oncurve points
contour = newContour
newContour = []
for op, args in contour:
if op == "qCurveTo" and newContour and newContour[-1][0] == "qCurveTo":
pt0 = newContour[-1][1][-2]
pt1 = newContour[-1][1][-1]
pt2 = args[0]
if (
pt1 is not None
and math.isclose(pt2[0] - pt1[0], pt1[0] - pt0[0])
and math.isclose(pt2[1] - pt1[1], pt1[1] - pt0[1])
):
newArgs = newContour[-1][1][:-1] + args
newContour[-1] = (op, newArgs)
continue
newContour.append((op, args))
return newContour
from fontTools.pens.basePen import BasePen
from reportlab.graphics.shapes import Path
__all__ = ["ReportLabPen"]
class ReportLabPen(BasePen):
"""A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object."""
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
path = Path()
self.path = path
def _moveTo(self, p):
(x, y) = p
self.path.moveTo(x, y)
def _lineTo(self, p):
(x, y) = p
self.path.lineTo(x, y)
def _curveToOne(self, p1, p2, p3):
(x1, y1) = p1
(x2, y2) = p2
(x3, y3) = p3
self.path.curveTo(x1, y1, x2, y2, x3, y3)
def _closePath(self):
self.path.closePath()
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print(
"Usage: reportLabPen.py []"
)
print(
" If no image file name is created, by default .png is created."
)
print(" example: reportLabPen.py Arial.TTF R test.png")
print(
" (The file format will be PNG, regardless of the image file name supplied)"
)
sys.exit(0)
from fontTools.ttLib import TTFont
from reportlab.lib import colors
path = sys.argv[1]
glyphName = sys.argv[2]
if len(sys.argv) > 3:
imageFile = sys.argv[3]
else:
imageFile = "%s.png" % glyphName
font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
gs = font.getGlyphSet()
pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
g = gs[glyphName]
g.draw(pen)
w, h = g.width, 1000
from reportlab.graphics import renderPM
from reportlab.graphics.shapes import Group, Drawing, scale
# Everything is wrapped in a group to allow transformations.
g = Group(pen.path)
g.translate(0, 200)
g.scale(0.3, 0.3)
d = Drawing(w, h)
d.add(g)
renderPM.drawToFile(d, imageFile, fmt="PNG")
from fontTools.misc.arrayTools import pairwise
from fontTools.pens.filterPen import ContourFilterPen
__all__ = ["reversedContour", "ReverseContourPen"]
class ReverseContourPen(ContourFilterPen):
"""Filter pen that passes outline data to another pen, but reversing
the winding direction of all contours. Components are simply passed
through unchanged.
Closed contours are reversed in such a way that the first point remains
the first point.
"""
def __init__(self, outPen, outputImpliedClosingLine=False):
super().__init__(outPen)
self.outputImpliedClosingLine = outputImpliedClosingLine
def filterContour(self, contour):
return reversedContour(contour, self.outputImpliedClosingLine)
def reversedContour(contour, outputImpliedClosingLine=False):
"""Generator that takes a list of pen's (operator, operands) tuples,
and yields them with the winding direction reversed.
"""
if not contour:
return # nothing to do, stop iteration
# valid contours must have at least a starting and ending command,
# can't have one without the other
assert len(contour) > 1, "invalid contour"
# the type of the last command determines if the contour is closed
contourType = contour.pop()[0]
assert contourType in ("endPath", "closePath")
closed = contourType == "closePath"
firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), (
"invalid initial segment type: %r" % firstType
)
firstOnCurve = firstPts[-1]
if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points
assert firstOnCurve is None, "off-curve only paths must end with 'None'"
assert not contour, "only one qCurveTo allowed per off-curve path"
firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,)
if not contour:
# contour contains only one segment, nothing to reverse
if firstType == "moveTo":
closed = False # single-point paths can't be closed
else:
closed = True # off-curve paths are closed by definition
yield firstType, firstPts
else:
lastType, lastPts = contour[-1]
lastOnCurve = lastPts[-1]
if closed:
# for closed paths, we keep the starting point
yield firstType, firstPts
if firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,)
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1:
secondType, secondPts = contour[0]
else:
# contour has only two points, the second and last are the same
secondType, secondPts = lastType, lastPts
if not outputImpliedClosingLine:
# if a lineTo follows the initial moveTo, after reversing it
# will be implied by the closePath, so we don't emit one;
# unless the lineTo and moveTo overlap, in which case we keep the
# duplicate points
if secondType == "lineTo" and firstPts != secondPts:
del contour[0]
if contour:
contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts)
else:
# for open paths, the last point will become the first
yield firstType, (lastOnCurve,)
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
# we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment
for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", ()
# Copyright (c) 2009 Type Supply LLC
# Author: Tal Leming
from __future__ import annotations
from typing import Any, Dict, List, Tuple
from fontTools.cffLib.specializer import commandsToProgram, specializeCommands
from fontTools.misc.psCharStrings import T2CharString
from fontTools.misc.roundTools import otRound, roundFunc
from fontTools.pens.basePen import BasePen
class T2CharStringPen(BasePen):
"""Pen to draw Type 2 CharStrings.
The 'roundTolerance' argument controls the rounding of point coordinates.
It is defined as the maximum absolute difference between the original
float and the rounded integer value.
The default tolerance of 0.5 means that all floats are rounded to integer;
a value of 0 disables rounding; values in between will only round floats
which are close to their integral part within the tolerated range.
"""
def __init__(
self,
width: float | None,
glyphSet: Dict[str, Any] | None,
roundTolerance: float = 0.5,
CFF2: bool = False,
) -> None:
super(T2CharStringPen, self).__init__(glyphSet)
self.round = roundFunc(roundTolerance)
self._CFF2 = CFF2
self._width = width
self._commands: List[Tuple[str | bytes, List[float]]] = []
self._p0 = (0, 0)
def _p(self, pt: Tuple[float, float]) -> List[float]:
p0 = self._p0
pt = self._p0 = (self.round(pt[0]), self.round(pt[1]))
return [pt[0] - p0[0], pt[1] - p0[1]]
def _moveTo(self, pt: Tuple[float, float]) -> None:
self._commands.append(("rmoveto", self._p(pt)))
def _lineTo(self, pt: Tuple[float, float]) -> None:
self._commands.append(("rlineto", self._p(pt)))
def _curveToOne(
self,
pt1: Tuple[float, float],
pt2: Tuple[float, float],
pt3: Tuple[float, float],
) -> None:
_p = self._p
self._commands.append(("rrcurveto", _p(pt1) + _p(pt2) + _p(pt3)))
def _closePath(self) -> None:
pass
def _endPath(self) -> None:
pass
def getCharString(
self,
private: Dict | None = None,
globalSubrs: List | None = None,
optimize: bool = True,
) -> T2CharString:
commands = self._commands
if optimize:
maxstack = 48 if not self._CFF2 else 513
commands = specializeCommands(
commands, generalizeFirst=False, maxstack=maxstack
)
program = commandsToProgram(commands)
if self._width is not None:
assert (
not self._CFF2
), "CFF2 does not allow encoding glyph width in CharString."
program.insert(0, otRound(self._width))
if not self._CFF2:
program.append("endchar")
charString = T2CharString(
program=program, private=private, globalSubrs=globalSubrs
)
return charString
venv\Lib\site-packages\fontTools\pens\teePen.py
"""Pen multiplexing drawing to one or more pens."""
from fontTools.pens.basePen import AbstractPen
__all__ = ["TeePen"]
class TeePen(AbstractPen):
"""Pen multiplexing drawing to one or more pens.
Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens)."""
def __init__(self, *pens):
if len(pens) == 1:
pens = pens[0]
self.pens = pens
def moveTo(self, p0):
for pen in self.pens:
pen.moveTo(p0)
def lineTo(self, p1):
for pen in self.pens:
pen.lineTo(p1)
def qCurveTo(self, *points):
for pen in self.pens:
pen.qCurveTo(*points)
def curveTo(self, *points):
for pen in self.pens:
pen.curveTo(*points)
def closePath(self):
for pen in self.pens:
pen.closePath()
def endPath(self):
for pen in self.pens:
pen.endPath()
def addComponent(self, glyphName, transformation):
for pen in self.pens:
pen.addComponent(glyphName, transformation)
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = TeePen(_TestPen(), _TestPen())
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()
from array import array
from typing import Any, Callable, Dict, Optional, Tuple
from fontTools.misc.fixedTools import MAX_F2DOT14, floatToFixedToFloat
from fontTools.misc.loggingTools import LogMixin
from fontTools.pens.pointPen import AbstractPointPen
from fontTools.misc.roundTools import otRound
from fontTools.pens.basePen import LoggingPen, PenError
from fontTools.pens.transformPen import TransformPen, TransformPointPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import flagOnCurve, flagCubic
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
from fontTools.ttLib.tables._g_l_y_f import dropImpliedOnCurvePoints
import math
__all__ = ["TTGlyphPen", "TTGlyphPointPen"]
class _TTGlyphBasePen:
def __init__(
self,
glyphSet: Optional[Dict[str, Any]],
handleOverflowingTransforms: bool = True,
) -> None:
"""
Construct a new pen.
Args:
glyphSet (Dict[str, Any]): A glyphset object, used to resolve components.
handleOverflowingTransforms (bool): See below.
If ``handleOverflowingTransforms`` is True, the components' transform values
are checked that they don't overflow the limits of a F2Dot14 number:
-2.0 <= v < +2.0. If any transform value exceeds these, the composite
glyph is decomposed.
An exception to this rule is done for values that are very close to +2.0
(both for consistency with the -2.0 case, and for the relative frequency
these occur in real fonts). When almost +2.0 values occur (and all other
values are within the range -2.0 <= x <= +2.0), they are clamped to the
maximum positive value that can still be encoded as an F2Dot14: i.e.
1.99993896484375.
If False, no check is done and all components are translated unmodified
into the glyf table, followed by an inevitable ``struct.error`` once an
attempt is made to compile them.
If both contours and components are present in a glyph, the components
are decomposed.
"""
self.glyphSet = glyphSet
self.handleOverflowingTransforms = handleOverflowingTransforms
self.init()
def _decompose(
self,
glyphName: str,
transformation: Tuple[float, float, float, float, float, float],
):
tpen = self.transformPen(self, transformation)
getattr(self.glyphSet[glyphName], self.drawMethod)(tpen)
def _isClosed(self):
"""
Check if the current path is closed.
"""
raise NotImplementedError
def init(self) -> None:
self.points = []
self.endPts = []
self.types = []
self.components = []
def addComponent(
self,
baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Add a sub glyph.
"""
self.components.append((baseGlyphName, transformation))
def _buildComponents(self, componentFlags):
if self.handleOverflowingTransforms:
# we can't encode transform values > 2 or < -2 in F2Dot14,
# so we must decompose the glyph if any transform exceeds these
overflowing = any(
s > 2 or s < -2
for (glyphName, transformation) in self.components
for s in transformation[:4]
)
components = []
for glyphName, transformation in self.components:
if glyphName not in self.glyphSet:
self.log.warning(f"skipped non-existing component '{glyphName}'")
continue
if self.points or (self.handleOverflowingTransforms and overflowing):
# can't have both coordinates and components, so decompose
self._decompose(glyphName, transformation)
continue
component = GlyphComponent()
component.glyphName = glyphName
component.x, component.y = (otRound(v) for v in transformation[4:])
# quantize floats to F2Dot14 so we get same values as when decompiled
# from a binary glyf table
transformation = tuple(
floatToFixedToFloat(v, 14) for v in transformation[:4]
)
if transformation != (1, 0, 0, 1):
if self.handleOverflowingTransforms and any(
MAX_F2DOT14 < s <= 2 for s in transformation
):
# clamp values ~= +2.0 so we can keep the component
transformation = tuple(
MAX_F2DOT14 if MAX_F2DOT14 < s <= 2 else s
for s in transformation
)
component.transform = (transformation[:2], transformation[2:])
component.flags = componentFlags
components.append(component)
return components
def glyph(
self,
componentFlags: int = 0x04,
dropImpliedOnCurves: bool = False,
*,
round: Callable[[float], int] = otRound,
) -> Glyph:
"""
Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
Args:
componentFlags: Flags to use for component glyphs. (default: 0x04)
dropImpliedOnCurves: Whether to remove implied-oncurve points. (default: False)
"""
if not self._isClosed():
raise PenError("Didn't close last contour.")
components = self._buildComponents(componentFlags)
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
self.init()
if components:
# If both components and contours were present, they have by now
# been decomposed by _buildComponents.
glyph.components = components
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode(b"")
if dropImpliedOnCurves:
dropImpliedOnCurvePoints(glyph)
glyph.coordinates.toInt(round=round)
return glyph
class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
"""
Pen used for drawing to a TrueType glyph.
This pen can be used to construct or modify glyphs in a TrueType format
font. After using the pen to draw, use the ``.glyph()`` method to retrieve
a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
"""
drawMethod = "draw"
transformPen = TransformPen
def __init__(
self,
glyphSet: Optional[Dict[str, Any]] = None,
handleOverflowingTransforms: bool = True,
outputImpliedClosingLine: bool = False,
) -> None:
super().__init__(glyphSet, handleOverflowingTransforms)
self.outputImpliedClosingLine = outputImpliedClosingLine
def _addPoint(self, pt: Tuple[float, float], tp: int) -> None:
self.points.append(pt)
self.types.append(tp)
def _popPoint(self) -> None:
self.points.pop()
self.types.pop()
def _isClosed(self) -> bool:
return (not self.points) or (
self.endPts and self.endPts[-1] == len(self.points) - 1
)
def lineTo(self, pt: Tuple[float, float]) -> None:
self._addPoint(pt, flagOnCurve)
def moveTo(self, pt: Tuple[float, float]) -> None:
if not self._isClosed():
raise PenError('"move"-type point must begin a new contour.')
self._addPoint(pt, flagOnCurve)
def curveTo(self, *points) -> None:
assert len(points) % 2 == 1
for pt in points[:-1]:
self._addPoint(pt, flagCubic)
# last point is None if there are no on-curve points
if points[-1] is not None:
self._addPoint(points[-1], 1)
def qCurveTo(self, *points) -> None:
assert len(points) >= 1
for pt in points[:-1]:
self._addPoint(pt, 0)
# last point is None if there are no on-curve points
if points[-1] is not None:
self._addPoint(points[-1], 1)
def closePath(self) -> None:
endPt = len(self.points) - 1
# ignore anchors (one-point paths)
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self._popPoint()
return
if not self.outputImpliedClosingLine:
# if first and last point on this path are the same, remove last
startPt = 0
if self.endPts:
startPt = self.endPts[-1] + 1
if self.points[startPt] == self.points[endPt]:
self._popPoint()
endPt -= 1
self.endPts.append(endPt)
def endPath(self) -> None:
# TrueType contours are always "closed"
self.closePath()
class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
"""
Point pen used for drawing to a TrueType glyph.
This pen can be used to construct or modify glyphs in a TrueType format
font. After using the pen to draw, use the ``.glyph()`` method to retrieve
a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
"""
drawMethod = "drawPoints"
transformPen = TransformPointPen
def init(self) -> None:
super().init()
self._currentContourStartIndex = None
def _isClosed(self) -> bool:
return self._currentContourStartIndex is None
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
"""
Start a new sub path.
"""
if not self._isClosed():
raise PenError("Didn't close previous contour.")
self._currentContourStartIndex = len(self.points)
def endPath(self) -> None:
"""
End the current sub path.
"""
# TrueType contours are always "closed"
if self._isClosed():
raise PenError("Contour is already closed.")
if self._currentContourStartIndex == len(self.points):
# ignore empty contours
self._currentContourStartIndex = None
return
contourStart = self.endPts[-1] + 1 if self.endPts else 0
self.endPts.append(len(self.points) - 1)
self._currentContourStartIndex = None
# Resolve types for any cubic segments
flags = self.types
for i in range(contourStart, len(flags)):
if flags[i] == "curve":
j = i - 1
if j < contourStart:
j = len(flags) - 1
while flags[j] == 0:
flags[j] = flagCubic
j -= 1
flags[i] = flagOnCurve
def addPoint(
self,
pt: Tuple[float, float],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Add a point to the current sub path.
"""
if self._isClosed():
raise PenError("Can't add a point to a closed contour.")
if segmentType is None:
self.types.append(0)
elif segmentType in ("line", "move"):
self.types.append(flagOnCurve)
elif segmentType == "qcurve":
self.types.append(flagOnCurve)
elif segmentType == "curve":
self.types.append("curve")
else:
raise AssertionError(segmentType)
self.points.append(pt)
"""Benchmark the qu2cu algorithm performance."""
from .qu2cu import *
from fontTools.cu2qu import curve_to_quadratic
import random
import timeit
MAX_ERR = 0.5
NUM_CURVES = 5
def generate_curves(n):
points = [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(1 + 3 * n)
]
curves = []
for i in range(n):
curves.append(tuple(points[i * 3 : i * 3 + 4]))
return curves
def setup_quadratic_to_curves():
curves = generate_curves(NUM_CURVES)
quadratics = [curve_to_quadratic(curve, MAX_ERR) for curve in curves]
return quadratics, MAX_ERR
def run_benchmark(module, function, setup_suffix="", repeat=25, number=1):
setup_func = "setup_" + function
if setup_suffix:
print("%s with %s:" % (function, setup_suffix), end="")
setup_func += "_" + setup_suffix
else:
print("%s:" % function, end="")
def wrapper(function, setup_func):
function = globals()[function]
setup_func = globals()[setup_func]
def wrapped():
return function(*setup_func())
return wrapped
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
def main():
run_benchmark("qu2cu", "quadratic_to_curves")
if __name__ == "__main__":
random.seed(1)
main()
venv\Lib\site-packages\fontTools\qu2cu\cli.py
import os
import argparse
import logging
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.ttLib import TTFont
from fontTools.pens.qu2cuPen import Qu2CuPen
from fontTools.pens.ttGlyphPen import TTGlyphPen
import fontTools
logger = logging.getLogger("fontTools.qu2cu")
def _font_to_cubic(input_path, output_path=None, **kwargs):
font = TTFont(input_path)
logger.info("Converting curves for %s", input_path)
stats = {} if kwargs["dump_stats"] else None
qu2cu_kwargs = {
"stats": stats,
"max_err": kwargs["max_err_em"] * font["head"].unitsPerEm,
"all_cubic": kwargs["all_cubic"],
}
assert "gvar" not in font, "Cannot convert variable font"
glyphSet = font.getGlyphSet()
glyphOrder = font.getGlyphOrder()
glyf = font["glyf"]
for glyphName in glyphOrder:
glyph = glyphSet[glyphName]
ttpen = TTGlyphPen(glyphSet)
pen = Qu2CuPen(ttpen, **qu2cu_kwargs)
glyph.draw(pen)
glyf[glyphName] = ttpen.glyph(dropImpliedOnCurves=True)
font["head"].glyphDataFormat = 1
if kwargs["dump_stats"]:
logger.info("Stats: %s", stats)
logger.info("Saving %s", output_path)
font.save(output_path)
def _main(args=None):
"""Convert an OpenType font from quadratic to cubic curves"""
parser = argparse.ArgumentParser(prog="qu2cu")
parser.add_argument("--version", action="version", version=fontTools.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input TTF source file(s).",
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-e",
"--conversion-error",
type=float,
metavar="ERROR",
default=0.001,
help="maxiumum approximation error measured in EM (default: 0.001)",
)
parser.add_argument(
"-c",
"--all-cubic",
default=False,
action="store_true",
help="whether to only use cubic curves",
)
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
"-o",
"--output-file",
default=None,
metavar="OUTPUT",
help=("output filename for the converted TTF."),
)
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted TTFs",
)
options = parser.parse_args(args)
if not options.verbose:
level = "WARNING"
elif options.verbose == 1:
level = "INFO"
else:
level = "DEBUG"
logging.basicConfig(level=level)
if len(options.infiles) > 1 and options.output_file:
parser.error("-o/--output-file can't be used with multile inputs")
if options.output_dir:
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif not os.path.isdir(output_dir):
parser.error("'%s' is not a directory" % output_dir)
output_paths = [
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
else:
output_paths = [
makeOutputFileName(p, overWrite=True, suffix=".cubic")
for p in options.infiles
]
kwargs = dict(
dump_stats=options.verbose > 0,
max_err_em=options.conversion_error,
all_cubic=options.all_cubic,
)
for input_path, output_path in zip(options.infiles, output_paths):
_font_to_cubic(input_path, output_path, **kwargs)
venv\Lib\site-packages\fontTools\qu2cu\qu2cu.py
# cython: language_level=3
# distutils: define_macros=CYTHON_TRACE_NOGIL=1
# Copyright 2023 Google Inc. All Rights Reserved.
# Copyright 2023 Behdad Esfahbod. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cython
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = cython.compiled
from fontTools.misc.bezierTools import splitCubicAtTC
from collections import namedtuple
import math
from typing import (
List,
Tuple,
Union,
)
__all__ = ["quadratic_to_curves"]
# Copied from cu2qu
@cython.cfunc
@cython.returns(cython.int)
@cython.locals(
tolerance=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
@cython.locals(mid=cython.complex, deriv3=cython.complex)
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
"""Check if a cubic Bezier lies within a given distance of the origin.
"Origin" means *the* origin (0,0), not the start of the curve. Note that no
checks are made on the start and end positions of the curve; this function
only checks the inside of the curve.
Args:
p0 (complex): Start point of curve.
p1 (complex): First handle of curve.
p2 (complex): Second handle of curve.
p3 (complex): End point of curve.
tolerance (double): Distance from origin.
Returns:
bool: True if the cubic Bezier ``p`` entirely lies within a distance
``tolerance`` of the origin, False otherwise.
"""
# First check p2 then p1, as p2 has higher error early on.
if abs(p2) <= tolerance and abs(p1) <= tolerance:
return True
# Split.
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
if abs(mid) > tolerance:
return False
deriv3 = (p3 + p2 - p1 - p0) * 0.125
return cubic_farthest_fit_inside(
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
@cython.locals(
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p1_2_3=cython.complex,
)
def elevate_quadratic(p0, p1, p2):
"""Given a quadratic bezier curve, return its degree-elevated cubic."""
# https://pomax.github.io/bezierinfo/#reordering
p1_2_3 = p1 * (2 / 3)
return (
p0,
(p0 * (1 / 3) + p1_2_3),
(p2 * (1 / 3) + p1_2_3),
p2,
)
@cython.cfunc
@cython.locals(
start=cython.int,
n=cython.int,
k=cython.int,
prod_ratio=cython.double,
sum_ratio=cython.double,
ratio=cython.double,
t=cython.double,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
)
def merge_curves(curves, start, n):
"""Give a cubic-Bezier spline, reconstruct one cubic-Bezier
that has the same endpoints and tangents and approxmates
the spline."""
# Reconstruct the t values of the cut segments
prod_ratio = 1.0
sum_ratio = 1.0
ts = [1]
for k in range(1, n):
ck = curves[start + k]
c_before = curves[start + k - 1]
# |t_(k+1) - t_k| / |t_k - t_(k - 1)| = ratio
assert ck[0] == c_before[3]
ratio = abs(ck[1] - ck[0]) / abs(c_before[3] - c_before[2])
prod_ratio *= ratio
sum_ratio += prod_ratio
ts.append(sum_ratio)
# (t(n) - t(n - 1)) / (t_(1) - t(0)) = prod_ratio
ts = [t / sum_ratio for t in ts[:-1]]
p0 = curves[start][0]
p1 = curves[start][1]
p2 = curves[start + n - 1][2]
p3 = curves[start + n - 1][3]
# Build the curve by scaling the control-points.
p1 = p0 + (p1 - p0) / (ts[0] if ts else 1)
p2 = p3 + (p2 - p3) / ((1 - ts[-1]) if ts else 1)
curve = (p0, p1, p2, p3)
return curve, ts
@cython.locals(
count=cython.int,
num_offcurves=cython.int,
i=cython.int,
off1=cython.complex,
off2=cython.complex,
on=cython.complex,
)
def add_implicit_on_curves(p):
q = list(p)
count = 0
num_offcurves = len(p) - 2
for i in range(1, num_offcurves):
off1 = p[i]
off2 = p[i + 1]
on = off1 + (off2 - off1) * 0.5
q.insert(i + 1 + count, on)
count += 1
return q
Point = Union[Tuple[float, float], complex]
@cython.locals(
cost=cython.int,
is_complex=cython.int,
)
def quadratic_to_curves(
quads: List[List[Point]],
max_err: float = 0.5,
all_cubic: bool = False,
) -> List[Tuple[Point, ...]]:
"""Converts a connecting list of quadratic splines to a list of quadratic
and cubic curves.
A quadratic spline is specified as a list of points. Either each point is
a 2-tuple of X,Y coordinates, or each point is a complex number with
real/imaginary components representing X,Y coordinates.
The first and last points are on-curve points and the rest are off-curve
points, with an implied on-curve point in the middle between every two
consequtive off-curve points.
Returns:
The output is a list of tuples of points. Points are represented
in the same format as the input, either as 2-tuples or complex numbers.
Each tuple is either of length three, for a quadratic curve, or four,
for a cubic curve. Each curve's last point is the same as the next
curve's first point.
Args:
quads: quadratic splines
max_err: absolute error tolerance; defaults to 0.5
all_cubic: if True, only cubic curves are generated; defaults to False
"""
is_complex = type(quads[0][0]) is complex
if not is_complex:
quads = [[complex(x, y) for (x, y) in p] for p in quads]
q = [quads[0][0]]
costs = [1]
cost = 1
for p in quads:
assert q[-1] == p[0]
for i in range(len(p) - 2):
cost += 1
costs.append(cost)
costs.append(cost)
qq = add_implicit_on_curves(p)[1:]
costs.pop()
q.extend(qq)
cost += 1
costs.append(cost)
curves = spline_to_curves(q, costs, max_err, all_cubic)
if not is_complex:
curves = [tuple((c.real, c.imag) for c in curve) for curve in curves]
return curves
Solution = namedtuple("Solution", ["num_points", "error", "start_index", "is_cubic"])
@cython.locals(
i=cython.int,
j=cython.int,
k=cython.int,
start=cython.int,
i_sol_count=cython.int,
j_sol_count=cython.int,
this_sol_count=cython.int,
tolerance=cython.double,
err=cython.double,
error=cython.double,
i_sol_error=cython.double,
j_sol_error=cython.double,
all_cubic=cython.int,
is_cubic=cython.int,
count=cython.int,
p0=cython.complex,
p1=cython.complex,
p2=cython.complex,
p3=cython.complex,
v=cython.complex,
u=cython.complex,
)
def spline_to_curves(q, costs, tolerance=0.5, all_cubic=False):
"""
q: quadratic spline with alternating on-curve / off-curve points.
costs: cumulative list of encoding cost of q in terms of number of
points that need to be encoded. Implied on-curve points do not
contribute to the cost. If all points need to be encoded, then
costs will be range(1, len(q)+1).
"""
assert len(q) >= 3, "quadratic spline requires at least 3 points"
# Elevate quadratic segments to cubic
elevated_quadratics = [
elevate_quadratic(*q[i : i + 3]) for i in range(0, len(q) - 2, 2)
]
# Find sharp corners; they have to be oncurves for sure.
forced = set()
for i in range(1, len(elevated_quadratics)):
p0 = elevated_quadratics[i - 1][2]
p1 = elevated_quadratics[i][0]
p2 = elevated_quadratics[i][1]
if abs(p1 - p0) + abs(p2 - p1) > tolerance + abs(p2 - p0):
forced.add(i)
# Dynamic-Programming to find the solution with fewest number of
# cubic curves, and within those the one with smallest error.
sols = [Solution(0, 0, 0, False)]
impossible = Solution(len(elevated_quadratics) * 3 + 1, 0, 1, False)
start = 0
for i in range(1, len(elevated_quadratics) + 1):
best_sol = impossible
for j in range(start, i):
j_sol_count, j_sol_error = sols[j].num_points, sols[j].error
if not all_cubic:
# Solution with quadratics between j:i
this_count = costs[2 * i - 1] - costs[2 * j] + 1
i_sol_count = j_sol_count + this_count
i_sol_error = j_sol_error
i_sol = Solution(i_sol_count, i_sol_error, i - j, False)
if i_sol < best_sol:
best_sol = i_sol
if this_count <= 3:
# Can't get any better than this in the path below
continue
# Fit elevated_quadratics[j:i] into one cubic
try:
curve, ts = merge_curves(elevated_quadratics, j, i - j)
except ZeroDivisionError:
continue
# Now reconstruct the segments from the fitted curve
reconstructed_iter = splitCubicAtTC(*curve, *ts)
reconstructed = []
# Knot errors
error = 0
for k, reconst in enumerate(reconstructed_iter):
orig = elevated_quadratics[j + k]
err = abs(reconst[3] - orig[3])
error = max(error, err)
if error > tolerance:
break
reconstructed.append(reconst)
if error > tolerance:
# Not feasible
continue
# Interior errors
for k, reconst in enumerate(reconstructed):
orig = elevated_quadratics[j + k]
p0, p1, p2, p3 = tuple(v - u for v, u in zip(reconst, orig))
if not cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
error = tolerance + 1
break
if error > tolerance:
# Not feasible
continue
# Save best solution
i_sol_count = j_sol_count + 3
i_sol_error = max(j_sol_error, error)
i_sol = Solution(i_sol_count, i_sol_error, i - j, True)
if i_sol < best_sol:
best_sol = i_sol
if i_sol_count == 3:
# Can't get any better than this
break
sols.append(best_sol)
if i in forced:
start = i
# Reconstruct solution
splits = []
cubic = []
i = len(sols) - 1
while i:
count, is_cubic = sols[i].start_index, sols[i].is_cubic
splits.append(i)
cubic.append(is_cubic)
i -= count
curves = []
j = 0
for i, is_cubic in reversed(list(zip(splits, cubic))):
if is_cubic:
curves.append(merge_curves(elevated_quadratics, j, i - j)[0])
else:
for k in range(j, i):
curves.append(q[k * 2 : k * 2 + 3])
j = i
return curves
def main():
from fontTools.cu2qu.benchmark import generate_curve
from fontTools.cu2qu import curve_to_quadratic
tolerance = 0.05
reconstruct_tolerance = tolerance * 1
curve = generate_curve()
quadratics = curve_to_quadratic(curve, tolerance)
print(
"cu2qu tolerance %g. qu2cu tolerance %g." % (tolerance, reconstruct_tolerance)
)
print("One random cubic turned into %d quadratics." % len(quadratics))
curves = quadratic_to_curves([quadratics], reconstruct_tolerance)
print("Those quadratics turned back into %d cubics. " % len(curves))
print("Original curve:", curve)
print("Reconstructed curve(s):", curves)
if __name__ == "__main__":
main()
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .qu2cu import *
import sys
from .cli import _main as main
if __name__ == "__main__":
sys.exit(main())
venv\Lib\site-packages\fontTools\subset\cff.py
from fontTools.misc import psCharStrings
from fontTools import ttLib
from fontTools.pens.basePen import NullPen
from fontTools.misc.roundTools import otRound
from fontTools.misc.loggingTools import deprecateFunction
from fontTools.subset.util import _add_method, _uniq_sort
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, components, localSubrs, globalSubrs):
psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
self.components = components
def op_endchar(self, index):
args = self.popall()
if len(args) >= 4:
from fontTools.encodings.StandardEncoding import StandardEncoding
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args[-4:]
baseGlyph = StandardEncoding[bchar]
accentGlyph = StandardEncoding[achar]
self.components.add(baseGlyph)
self.components.add(accentGlyph)
@_add_method(ttLib.getTableClass("CFF "))
def closure_glyphs(self, s):
cff = self.cff
assert len(cff) == 1
font = cff[cff.keys()[0]]
glyphSet = font.CharStrings
decompose = s.glyphs
while decompose:
components = set()
for g in decompose:
if g not in glyphSet:
continue
gl = glyphSet[g]
subrs = getattr(gl.private, "Subrs", [])
decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
decompiler.execute(gl)
components -= s.glyphs
s.glyphs.update(components)
decompose = components
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
c.setProgram([] if isCFF2 else ["endchar"])
else:
if hasattr(font, "FDArray") and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
else:
private = font.Private
dfltWdX = private.defaultWidthX
nmnlWdX = private.nominalWidthX
pen = NullPen()
c.draw(pen) # this will set the charstring's width
if c.width != dfltWdX:
c.program = [c.width - nmnlWdX, "endchar"]
else:
c.program = ["endchar"]
@_add_method(ttLib.getTableClass("CFF "))
def prune_pre_subset(self, font, options):
cff = self.cff
# CFF table must have one font only
cff.fontNames = cff.fontNames[:1]
if options.notdef_glyph and not options.notdef_outline:
isCFF2 = cff.major > 1
for fontname in cff.keys():
font = cff[fontname]
_empty_charstring(font, ".notdef", isCFF2=isCFF2)
# Clear useless Encoding
for fontname in cff.keys():
font = cff[fontname]
# https://github.com/fonttools/fonttools/issues/620
font.Encoding = "StandardEncoding"
return True # bool(cff.fontNames)
@_add_method(ttLib.getTableClass("CFF "))
def subset_glyphs(self, s):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
glyphs = s.glyphs.union(s.glyphs_emptied)
# Load all glyphs
for g in font.charset:
if g not in glyphs:
continue
c, _ = cs.getItemAndSelector(g)
if cs.charStringsAreIndexed:
indices = [i for i, g in enumerate(font.charset) if g in glyphs]
csi = cs.charStringsIndex
csi.items = [csi.items[i] for i in indices]
del csi.file, csi.offsets
if hasattr(font, "FDSelect"):
sel = font.FDSelect
sel.format = None
sel.gidArray = [sel.gidArray[i] for i in indices]
newCharStrings = {}
for indicesIdx, charsetIdx in enumerate(indices):
g = font.charset[charsetIdx]
if g in cs.charStrings:
newCharStrings[g] = indicesIdx
cs.charStrings = newCharStrings
else:
cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
font.charset = [g for g in font.charset if g in glyphs]
font.numGlyphs = len(font.charset)
if s.options.retain_gids:
isCFF2 = cff.major > 1
for g in s.glyphs_emptied:
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
@_add_method(ttLib.getTableClass("CFF "))
def prune_post_subset(self, ttfFont, options):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Drop unused FontDictionaries
if hasattr(font, "FDSelect"):
sel = font.FDSelect
indices = _uniq_sort(sel.gidArray)
sel.gidArray = [indices.index(ss) for ss in sel.gidArray]
arr = font.FDArray
arr.items = [arr[i] for i in indices]
del arr.file, arr.offsets
# Desubroutinize if asked for
if options.desubroutinize:
cff.desubroutinize()
# Drop hints if not needed
if not options.hinting:
self.remove_hints()
elif not options.desubroutinize:
self.remove_unused_subroutines()
return True
@deprecateFunction(
"use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning
)
@_add_method(ttLib.getTableClass("CFF "))
def desubroutinize(self):
self.cff.desubroutinize()
@deprecateFunction(
"use 'CFFFontSet.remove_hints()' instead", category=DeprecationWarning
)
@_add_method(ttLib.getTableClass("CFF "))
def remove_hints(self):
self.cff.remove_hints()
@deprecateFunction(
"use 'CFFFontSet.remove_unused_subroutines' instead", category=DeprecationWarning
)
@_add_method(ttLib.getTableClass("CFF "))
def remove_unused_subroutines(self):
self.cff.remove_unused_subroutines()
venv\Lib\site-packages\fontTools\subset\svg.py
from __future__ import annotations
import re
from functools import lru_cache
from itertools import chain, count
from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple
try:
from lxml import etree
except ImportError:
# lxml is required for subsetting SVG, but we prefer to delay the import error
# until subset_glyphs() is called (i.e. if font to subset has an 'SVG ' table)
etree = None
from fontTools import ttLib
from fontTools.subset.util import _add_method
from fontTools.ttLib.tables.S_V_G_ import SVGDocument
__all__ = ["subset_glyphs"]
GID_RE = re.compile(r"^glyph(\d+)$")
NAMESPACES = {
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
}
XLINK_HREF = f'{{{NAMESPACES["xlink"]}}}href'
# TODO(antrotype): Replace with functools.cache once we are 3.9+
@lru_cache(maxsize=None)
def xpath(path):
# compile XPath upfront, caching result to reuse on multiple elements
return etree.XPath(path, namespaces=NAMESPACES)
def group_elements_by_id(tree: etree.Element) -> Dict[str, etree.Element]:
# select all svg elements with 'id' attribute no matter where they are
# including the root element itself:
# https://github.com/fonttools/fonttools/issues/2548
return {el.attrib["id"]: el for el in xpath("//svg:*[@id]")(tree)}
def parse_css_declarations(style_attr: str) -> Dict[str, str]:
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/style
# https://developer.mozilla.org/en-US/docs/Web/CSS/Syntax#css_declarations
result = {}
for declaration in style_attr.split(";"):
if declaration.count(":") == 1:
property_name, value = declaration.split(":")
property_name = property_name.strip()
result[property_name] = value.strip()
elif declaration.strip():
raise ValueError(f"Invalid CSS declaration syntax: {declaration}")
return result
def iter_referenced_ids(tree: etree.Element) -> Iterator[str]:
# Yield all the ids that can be reached via references from this element tree.
# We currently support xlink:href (as used by
venv\Lib\site-packages\fontTools\subset\util.py
"""Private utility methods used by the subset modules"""
def _add_method(*clazzes):
"""Returns a decorator function that adds a new method to one or
more classes."""
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done:
continue # Support multiple names of a clazz
done.append(clazz)
assert clazz.__name__ != "DefaultTable", "Oops, table class not found."
assert not hasattr(
clazz, method.__name__
), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
def _uniq_sort(l):
return sorted(set(l))
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod
from __future__ import annotations
from fontTools import config
from fontTools.misc.roundTools import otRound
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
from fontTools.ttLib.tables.otBase import USE_HARFBUZZ_REPACKER
from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.pens.basePen import NullPen
from fontTools.misc.loggingTools import Timer
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.subset.util import _add_method, _uniq_sort
from fontTools.subset.cff import *
from fontTools.subset.svg import *
from fontTools.varLib import varStore, multiVarStore # For monkey-patching
from fontTools.ttLib.tables._n_a_m_e import NameRecordVisitor, makeName
from fontTools.unicodedata import mirrored
import sys
import struct
import array
import logging
from collections import Counter, defaultdict
from functools import reduce
from types import MethodType
__usage__ = "fonttools subset font-file [glyph...] [--option=value]..."
__doc__ = (
"""\
fonttools subset -- OpenType font subsetter and optimizer
fonttools subset is an OpenType font subsetter and optimizer, based on
fontTools. It accepts any TT- or CFF-flavored OpenType (.otf or .ttf)
or WOFF (.woff) font file. The subsetted glyph set is based on the
specified glyphs or characters, and specified OpenType layout features.
The tool also performs some size-reducing optimizations, aimed for using
subset fonts as webfonts. Individual optimizations can be enabled or
disabled, and are enabled by default when they are safe.
Usage: """
+ __usage__
+ """
At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file,
--text, --text-file, --unicodes, or --unicodes-file, must be specified.
Args:
font-file
The input font file.
glyph
Specify one or more glyph identifiers to include in the subset. Must be
PS glyph names, or the special string '*' to keep the entire glyph set.
Initial glyph set specification
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These options populate the initial glyph set. Same option can appear
multiple times, and the results are accummulated.
--gids=[,...]
Specify comma/whitespace-separated list of glyph IDs or ranges as decimal
numbers. For example, --gids=10-12,14 adds glyphs with numbers 10, 11,
12, and 14.
--gids-file=
Like --gids but reads from a file. Anything after a '#' on any line is
ignored as comments.
--glyphs=[,...]
Specify comma/whitespace-separated PS glyph names to add to the subset.
Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc
that are accepted on the command line. The special string '*' will keep
the entire glyph set.
--glyphs-file=
Like --glyphs but reads from a file. Anything after a '#' on any line
is ignored as comments.
--text=
Specify characters to include in the subset, as UTF-8 string.
--text-file=
Like --text but reads from a file. Newline character are not added to
the subset.
--unicodes=[,...]
Specify comma/whitespace-separated list of Unicode codepoints or
ranges as hex numbers, optionally prefixed with 'U+', 'u', etc.
For example, --unicodes=41-5a,61-7a adds ASCII letters, so does
the more verbose --unicodes=U+0041-005A,U+0061-007A.
The special strings '*' will choose all Unicode characters mapped
by the font.
--unicodes-file=
Like --unicodes, but reads from a file. Anything after a '#' on any
line in the file is ignored as comments.
--ignore-missing-glyphs
Do not fail if some requested glyphs or gids are not available in
the font.
--no-ignore-missing-glyphs
Stop and fail if some requested glyphs or gids are not available
in the font. [default]
--ignore-missing-unicodes [default]
Do not fail if some requested Unicode characters (including those
indirectly specified using --text or --text-file) are not available
in the font.
--no-ignore-missing-unicodes
Stop and fail if some requested Unicode characters are not available
in the font.
Note the default discrepancy between ignoring missing glyphs versus
unicodes. This is for historical reasons and in the future
--no-ignore-missing-unicodes might become default.
Other options
^^^^^^^^^^^^^
For the other options listed below, to see the current value of the option,
pass a value of '?' to it, with or without a '='. In some environments,
you might need to escape the question mark, like this: '--glyph-names\\?'.
Examples::
$ fonttools subset --glyph-names?
Current setting for 'glyph-names' is: False
$ fonttools subset --name-IDs=?
Current setting for 'name-IDs' is: [0, 1, 2, 3, 4, 5, 6]
$ fonttools subset --hinting? --no-hinting --hinting?
Current setting for 'hinting' is: True
Current setting for 'hinting' is: False
Output options
^^^^^^^^^^^^^^
--output-file=
The output font file. If not specified, the subsetted font
will be saved in as font-file.subset.
--flavor=
Specify flavor of output font file. May be 'woff' or 'woff2'.
Note that WOFF2 requires the Brotli Python extension, available
at https://github.com/google/brotli
--with-zopfli
Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 %
smaller than pure zlib, but the compression speed is much slower.
The Zopfli Python bindings are available at:
https://pypi.python.org/pypi/zopfli
--harfbuzz-repacker
By default, we serialize GPOS/GSUB using the HarfBuzz Repacker when
uharfbuzz can be imported and is successful, otherwise fall back to
the pure-python serializer. Set the option to force using the HarfBuzz
Repacker (raises an error if uharfbuzz can't be found or fails).
--no-harfbuzz-repacker
Always use the pure-python serializer even if uharfbuzz is available.
Glyph set expansion
^^^^^^^^^^^^^^^^^^^
These options control how additional glyphs are added to the subset.
--retain-gids
Retain glyph indices; just empty glyphs not needed in-place.
--notdef-glyph
Add the '.notdef' glyph to the subset (ie, keep it). [default]
--no-notdef-glyph
Drop the '.notdef' glyph unless specified in the glyph set. This
saves a few bytes, but is not possible for Postscript-flavored
fonts, as those require '.notdef'. For TrueType-flavored fonts,
this works fine as long as no unsupported glyphs are requested
from the font.
--notdef-outline
Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is
used when glyphs not supported by the font are to be shown. It is not
needed otherwise.
--no-notdef-outline
When including a '.notdef' glyph, remove its outline. This saves
a few bytes. [default]
--recommended-glyphs
Add glyphs 0, 1, 2, and 3 to the subset, as recommended for
TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'.
Some legacy software might require this, but no modern system does.
--no-recommended-glyphs
Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in
glyph set. [default]
--no-layout-closure
Do not expand glyph set to add glyphs produced by OpenType layout
features. Instead, OpenType layout features will be subset to only
rules that are relevant to the otherwise-specified glyph set.
--layout-features[+|-]=[,...]
Specify (=), add to (+=) or exclude from (-=) the comma-separated
set of OpenType layout feature tags that will be preserved.
Glyph variants used by the preserved features are added to the
specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs',
'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt',
'rlig', 'rvrn', and all features required for script shaping are
preserved. To see the full list, try '--layout-features=?'.
Use '*' to keep all features.
Multiple --layout-features options can be provided if necessary.
Examples:
--layout-features+=onum,pnum,ss01
* Keep the default set of features and 'onum', 'pnum', 'ss01'.
--layout-features-='mark','mkmk'
* Keep the default set of features but drop 'mark' and 'mkmk'.
--layout-features='kern'
* Only keep the 'kern' feature, drop all others.
--layout-features=''
* Drop all features.
--layout-features='*'
* Keep all features.
--layout-features+=aalt --layout-features-=vrt2
* Keep default set of features plus 'aalt', but drop 'vrt2'.
--layout-scripts[+|-]=